agentlin-client 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agentlin-client might be problematic. Click here for more details.

Files changed (123) hide show
  1. agentlin_client/__init__.py +90 -0
  2. agentlin_client/_base_client.py +1995 -0
  3. agentlin_client/_client.py +403 -0
  4. agentlin_client/_compat.py +219 -0
  5. agentlin_client/_constants.py +14 -0
  6. agentlin_client/_exceptions.py +108 -0
  7. agentlin_client/_files.py +123 -0
  8. agentlin_client/_models.py +835 -0
  9. agentlin_client/_qs.py +150 -0
  10. agentlin_client/_resource.py +43 -0
  11. agentlin_client/_response.py +832 -0
  12. agentlin_client/_streaming.py +333 -0
  13. agentlin_client/_types.py +260 -0
  14. agentlin_client/_utils/__init__.py +64 -0
  15. agentlin_client/_utils/_compat.py +45 -0
  16. agentlin_client/_utils/_datetime_parse.py +136 -0
  17. agentlin_client/_utils/_logs.py +25 -0
  18. agentlin_client/_utils/_proxy.py +65 -0
  19. agentlin_client/_utils/_reflection.py +42 -0
  20. agentlin_client/_utils/_resources_proxy.py +24 -0
  21. agentlin_client/_utils/_streams.py +12 -0
  22. agentlin_client/_utils/_sync.py +86 -0
  23. agentlin_client/_utils/_transform.py +457 -0
  24. agentlin_client/_utils/_typing.py +156 -0
  25. agentlin_client/_utils/_utils.py +421 -0
  26. agentlin_client/_version.py +4 -0
  27. agentlin_client/lib/.keep +4 -0
  28. agentlin_client/py.typed +0 -0
  29. agentlin_client/resources/__init__.py +33 -0
  30. agentlin_client/resources/conversations/__init__.py +33 -0
  31. agentlin_client/resources/conversations/conversations.py +489 -0
  32. agentlin_client/resources/conversations/items.py +558 -0
  33. agentlin_client/resources/responses.py +1136 -0
  34. agentlin_client/types/__init__.py +22 -0
  35. agentlin_client/types/conversation_create_params.py +28 -0
  36. agentlin_client/types/conversation_delete_response.py +15 -0
  37. agentlin_client/types/conversation_update_params.py +20 -0
  38. agentlin_client/types/conversations/__init__.py +74 -0
  39. agentlin_client/types/conversations/code_interpreter_tool_call.py +55 -0
  40. agentlin_client/types/conversations/code_interpreter_tool_call_param.py +54 -0
  41. agentlin_client/types/conversations/computer_screenshot_image.py +22 -0
  42. agentlin_client/types/conversations/computer_screenshot_image_param.py +21 -0
  43. agentlin_client/types/conversations/computer_tool_call.py +201 -0
  44. agentlin_client/types/conversations/computer_tool_call_output_resource.py +37 -0
  45. agentlin_client/types/conversations/computer_tool_call_param.py +199 -0
  46. agentlin_client/types/conversations/computer_tool_call_safety_check.py +16 -0
  47. agentlin_client/types/conversations/computer_tool_call_safety_check_param.py +18 -0
  48. agentlin_client/types/conversations/conversation_item.py +133 -0
  49. agentlin_client/types/conversations/conversation_item_list.py +26 -0
  50. agentlin_client/types/conversations/conversation_resource.py +32 -0
  51. agentlin_client/types/conversations/custom_tool_call.py +25 -0
  52. agentlin_client/types/conversations/custom_tool_call_output.py +26 -0
  53. agentlin_client/types/conversations/custom_tool_call_output_param.py +27 -0
  54. agentlin_client/types/conversations/custom_tool_call_param.py +24 -0
  55. agentlin_client/types/conversations/easy_input_message.py +26 -0
  56. agentlin_client/types/conversations/easy_input_message_param.py +27 -0
  57. agentlin_client/types/conversations/file_search_tool_call.py +42 -0
  58. agentlin_client/types/conversations/file_search_tool_call_param.py +44 -0
  59. agentlin_client/types/conversations/function_and_custom_tool_call_output.py +15 -0
  60. agentlin_client/types/conversations/function_and_custom_tool_call_output_param.py +16 -0
  61. agentlin_client/types/conversations/function_call_item_status.py +7 -0
  62. agentlin_client/types/conversations/function_tool_call.py +32 -0
  63. agentlin_client/types/conversations/function_tool_call_output_resource.py +33 -0
  64. agentlin_client/types/conversations/function_tool_call_param.py +31 -0
  65. agentlin_client/types/conversations/function_tool_call_resource.py +10 -0
  66. agentlin_client/types/conversations/image_gen_tool_call.py +22 -0
  67. agentlin_client/types/conversations/image_gen_tool_call_param.py +22 -0
  68. agentlin_client/types/conversations/includable.py +14 -0
  69. agentlin_client/types/conversations/input_content.py +32 -0
  70. agentlin_client/types/conversations/input_content_param.py +30 -0
  71. agentlin_client/types/conversations/input_file_content.py +25 -0
  72. agentlin_client/types/conversations/input_file_content_param.py +25 -0
  73. agentlin_client/types/conversations/input_image_content.py +28 -0
  74. agentlin_client/types/conversations/input_image_content_param.py +28 -0
  75. agentlin_client/types/conversations/input_item.py +209 -0
  76. agentlin_client/types/conversations/input_item_param.py +203 -0
  77. agentlin_client/types/conversations/input_message.py +30 -0
  78. agentlin_client/types/conversations/input_message_param.py +31 -0
  79. agentlin_client/types/conversations/input_text_content.py +15 -0
  80. agentlin_client/types/conversations/input_text_content_param.py +15 -0
  81. agentlin_client/types/conversations/item_create_params.py +24 -0
  82. agentlin_client/types/conversations/item_list_params.py +50 -0
  83. agentlin_client/types/conversations/item_retrieve_params.py +22 -0
  84. agentlin_client/types/conversations/local_shell_tool_call.py +45 -0
  85. agentlin_client/types/conversations/local_shell_tool_call_output.py +22 -0
  86. agentlin_client/types/conversations/local_shell_tool_call_output_param.py +22 -0
  87. agentlin_client/types/conversations/local_shell_tool_call_param.py +47 -0
  88. agentlin_client/types/conversations/mcp_approval_request.py +24 -0
  89. agentlin_client/types/conversations/mcp_approval_request_param.py +24 -0
  90. agentlin_client/types/conversations/mcp_approval_response_resource.py +25 -0
  91. agentlin_client/types/conversations/mcp_list_tools.py +39 -0
  92. agentlin_client/types/conversations/mcp_list_tools_param.py +39 -0
  93. agentlin_client/types/conversations/mcp_tool_call.py +44 -0
  94. agentlin_client/types/conversations/mcp_tool_call_param.py +44 -0
  95. agentlin_client/types/conversations/output_message.py +34 -0
  96. agentlin_client/types/conversations/output_message_param.py +34 -0
  97. agentlin_client/types/conversations/output_text_content.py +117 -0
  98. agentlin_client/types/conversations/output_text_content_param.py +115 -0
  99. agentlin_client/types/conversations/reasoning_item.py +44 -0
  100. agentlin_client/types/conversations/reasoning_item_param.py +45 -0
  101. agentlin_client/types/conversations/reasoning_text_content.py +15 -0
  102. agentlin_client/types/conversations/reasoning_text_content_param.py +15 -0
  103. agentlin_client/types/conversations/refusal_content.py +15 -0
  104. agentlin_client/types/conversations/refusal_content_param.py +15 -0
  105. agentlin_client/types/conversations/web_search_tool_call.py +67 -0
  106. agentlin_client/types/conversations/web_search_tool_call_param.py +66 -0
  107. agentlin_client/types/mcp_tool_filter.py +20 -0
  108. agentlin_client/types/mcp_tool_filter_param.py +22 -0
  109. agentlin_client/types/model_response_properties_standard.py +87 -0
  110. agentlin_client/types/response.py +166 -0
  111. agentlin_client/types/response_create_params.py +497 -0
  112. agentlin_client/types/response_list_input_items_params.py +34 -0
  113. agentlin_client/types/response_list_input_items_response.py +70 -0
  114. agentlin_client/types/response_properties.py +328 -0
  115. agentlin_client/types/response_retrieve_params.py +42 -0
  116. agentlin_client/types/response_tool.py +495 -0
  117. agentlin_client/types/response_tool_param.py +491 -0
  118. agentlin_client/types/text_response_format_configuration.py +59 -0
  119. agentlin_client/types/text_response_format_configuration_param.py +54 -0
  120. agentlin_client-0.1.0.dist-info/METADATA +429 -0
  121. agentlin_client-0.1.0.dist-info/RECORD +123 -0
  122. agentlin_client-0.1.0.dist-info/WHEEL +4 -0
  123. agentlin_client-0.1.0.dist-info/licenses/LICENSE +7 -0
@@ -0,0 +1,1136 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict, List, Union, Iterable, Optional
6
+ from typing_extensions import Literal
7
+
8
+ import httpx
9
+
10
+ from ..types import response_create_params, response_retrieve_params, response_list_input_items_params
11
+ from .._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
12
+ from .._utils import maybe_transform, async_maybe_transform
13
+ from .._compat import cached_property
14
+ from .._resource import SyncAPIResource, AsyncAPIResource
15
+ from .._response import (
16
+ to_raw_response_wrapper,
17
+ to_streamed_response_wrapper,
18
+ async_to_raw_response_wrapper,
19
+ async_to_streamed_response_wrapper,
20
+ )
21
+ from .._base_client import make_request_options
22
+ from ..types.response import Response
23
+ from ..types.response_tool_param import ResponseToolParam
24
+ from ..types.conversations.includable import Includable
25
+ from ..types.conversations.input_item_param import InputItemParam
26
+ from ..types.response_list_input_items_response import ResponseListInputItemsResponse
27
+
28
+ __all__ = ["ResponsesResource", "AsyncResponsesResource"]
29
+
30
+
31
+ class ResponsesResource(SyncAPIResource):
32
+ @cached_property
33
+ def with_raw_response(self) -> ResponsesResourceWithRawResponse:
34
+ """
35
+ This property can be used as a prefix for any HTTP method call to return
36
+ the raw response object instead of the parsed content.
37
+
38
+ For more information, see https://www.github.com/LinXueyuanStdio/agentlin-client-python#accessing-raw-response-data-eg-headers
39
+ """
40
+ return ResponsesResourceWithRawResponse(self)
41
+
42
+ @cached_property
43
+ def with_streaming_response(self) -> ResponsesResourceWithStreamingResponse:
44
+ """
45
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
46
+
47
+ For more information, see https://www.github.com/LinXueyuanStdio/agentlin-client-python#with_streaming_response
48
+ """
49
+ return ResponsesResourceWithStreamingResponse(self)
50
+
51
+ def create(
52
+ self,
53
+ *,
54
+ background: Optional[bool] | Omit = omit,
55
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
56
+ include: Optional[List[Includable]] | Omit = omit,
57
+ input: Union[str, Iterable[InputItemParam]] | Omit = omit,
58
+ instructions: Optional[str] | Omit = omit,
59
+ max_output_tokens: Optional[int] | Omit = omit,
60
+ max_tool_calls: Optional[int] | Omit = omit,
61
+ metadata: Optional[Dict[str, str]] | Omit = omit,
62
+ model: Literal[
63
+ "o1-pro",
64
+ "o1-pro-2025-03-19",
65
+ "o3-pro",
66
+ "o3-pro-2025-06-10",
67
+ "o3-deep-research",
68
+ "o3-deep-research-2025-06-26",
69
+ "o4-mini-deep-research",
70
+ "o4-mini-deep-research-2025-06-26",
71
+ "computer-use-preview",
72
+ "computer-use-preview-2025-03-11",
73
+ "gpt-5-codex",
74
+ "gpt-5-pro",
75
+ "gpt-5-pro-2025-10-06",
76
+ ]
77
+ | Omit = omit,
78
+ parallel_tool_calls: Optional[bool] | Omit = omit,
79
+ previous_response_id: Optional[str] | Omit = omit,
80
+ prompt: Optional[response_create_params.Prompt] | Omit = omit,
81
+ prompt_cache_key: str | Omit = omit,
82
+ reasoning: Optional[response_create_params.Reasoning] | Omit = omit,
83
+ safety_identifier: str | Omit = omit,
84
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
85
+ store: Optional[bool] | Omit = omit,
86
+ stream: Optional[bool] | Omit = omit,
87
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
88
+ temperature: Optional[float] | Omit = omit,
89
+ text: response_create_params.Text | Omit = omit,
90
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
91
+ tools: Iterable[ResponseToolParam] | Omit = omit,
92
+ top_logprobs: Optional[int] | Omit = omit,
93
+ top_p: Optional[float] | Omit = omit,
94
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
95
+ user: str | Omit = omit,
96
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
97
+ # The extra values given here take precedence over values defined on the client or passed to this method.
98
+ extra_headers: Headers | None = None,
99
+ extra_query: Query | None = None,
100
+ extra_body: Body | None = None,
101
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
102
+ ) -> Response:
103
+ """Creates a model response.
104
+
105
+ Provide
106
+ [text](https://platform.openai.com/docs/guides/text) or
107
+ [image](https://platform.openai.com/docs/guides/images) inputs to generate
108
+ [text](https://platform.openai.com/docs/guides/text) or
109
+ [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
110
+ the model call your own
111
+ [custom code](https://platform.openai.com/docs/guides/function-calling) or use
112
+ built-in [tools](https://platform.openai.com/docs/guides/tools) like
113
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
114
+ [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
115
+ your own data as input for the model's response.
116
+
117
+ Args:
118
+ background: Whether to run the model response in the background.
119
+ [Learn more](https://platform.openai.com/docs/guides/background).
120
+
121
+ conversation: The conversation that this response belongs to. Items from this conversation are
122
+ prepended to `input_items` for this response request. Input items and output
123
+ items from this response are automatically added to this conversation after this
124
+ response completes.
125
+
126
+ include: Specify additional output data to include in the model response. Currently
127
+ supported values are:
128
+
129
+ - `web_search_call.action.sources`: Include the sources of the web search tool
130
+ call.
131
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
132
+ in code interpreter tool call items.
133
+ - `computer_call_output.output.image_url`: Include image urls from the computer
134
+ call output.
135
+ - `file_search_call.results`: Include the search results of the file search tool
136
+ call.
137
+ - `message.input_image.image_url`: Include image urls from the input message.
138
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
139
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
140
+ tokens in reasoning item outputs. This enables reasoning items to be used in
141
+ multi-turn conversations when using the Responses API statelessly (like when
142
+ the `store` parameter is set to `false`, or when an organization is enrolled
143
+ in the zero data retention program).
144
+
145
+ input: Text, image, or file inputs to the model, used to generate a response.
146
+
147
+ Learn more:
148
+
149
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
150
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
151
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
152
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
153
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
154
+
155
+ instructions: A system (or developer) message inserted into the model's context.
156
+
157
+ When using along with `previous_response_id`, the instructions from a previous
158
+ response will not be carried over to the next response. This makes it simple to
159
+ swap out system (or developer) messages in new responses.
160
+
161
+ max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
162
+ including visible output tokens and
163
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
164
+
165
+ max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
166
+ response. This maximum number applies across all built-in tool calls, not per
167
+ individual tool. Any further attempts to call a tool by the model will be
168
+ ignored.
169
+
170
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
171
+ for storing additional information about the object in a structured format, and
172
+ querying for objects via API or the dashboard.
173
+
174
+ Keys are strings with a maximum length of 64 characters. Values are strings with
175
+ a maximum length of 512 characters.
176
+
177
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
178
+ wide range of models with different capabilities, performance characteristics,
179
+ and price points. Refer to the
180
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
181
+ available models.
182
+
183
+ parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
184
+
185
+ previous_response_id: The unique ID of the previous response to the model. Use this to create
186
+ multi-turn conversations. Learn more about
187
+ [conversation state](https://platform.openai.com/docs/guides/conversation-state).
188
+ Cannot be used in conjunction with `conversation`.
189
+
190
+ prompt: Reference to a prompt template and its variables.
191
+ [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
192
+
193
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
194
+ hit rates. Replaces the `user` field.
195
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
196
+
197
+ reasoning: **gpt-5 and o-series models only**
198
+
199
+ Configuration options for
200
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
201
+
202
+ safety_identifier: A stable identifier used to help detect users of your application that may be
203
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
204
+ identifies each user. We recommend hashing their username or email address, in
205
+ order to avoid sending us any identifying information.
206
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
207
+
208
+ service_tier: Specifies the processing type used for serving the request.
209
+
210
+ - If set to 'auto', then the request will be processed with the service tier
211
+ configured in the Project settings. Unless otherwise configured, the Project
212
+ will use 'default'.
213
+ - If set to 'default', then the request will be processed with the standard
214
+ pricing and performance for the selected model.
215
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
216
+ '[priority](https://openai.com/api-priority-processing/)', then the request
217
+ will be processed with the corresponding service tier.
218
+ - When not set, the default behavior is 'auto'.
219
+
220
+ When the `service_tier` parameter is set, the response body will include the
221
+ `service_tier` value based on the processing mode actually used to serve the
222
+ request. This response value may be different from the value set in the
223
+ parameter.
224
+
225
+ store: Whether to store the generated model response for later retrieval via API.
226
+
227
+ stream: If set to true, the model response data will be streamed to the client as it is
228
+ generated using
229
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
230
+ See the
231
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
232
+ for more information.
233
+
234
+ stream_options: Options for streaming responses. Only set this when you set `stream: true`.
235
+
236
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
237
+ make the output more random, while lower values like 0.2 will make it more
238
+ focused and deterministic. We generally recommend altering this or `top_p` but
239
+ not both.
240
+
241
+ text: Configuration options for a text response from the model. Can be plain text or
242
+ structured JSON data. Learn more:
243
+
244
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
245
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
246
+
247
+ tool_choice: How the model should select which tool (or tools) to use when generating a
248
+ response. See the `tools` parameter to see how to specify which tools the model
249
+ can call.
250
+
251
+ tools: An array of tools the model may call while generating a response. You can
252
+ specify which tool to use by setting the `tool_choice` parameter.
253
+
254
+ We support the following categories of tools:
255
+
256
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
257
+ capabilities, like
258
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
259
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
260
+ Learn more about
261
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
262
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
263
+ predefined connectors such as Google Drive and SharePoint. Learn more about
264
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
265
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
266
+ the model to call your own code with strongly typed arguments and outputs.
267
+ Learn more about
268
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
269
+ You can also use custom tools to call your own code.
270
+
271
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
272
+ return at each token position, each with an associated log probability.
273
+
274
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
275
+ model considers the results of the tokens with top_p probability mass. So 0.1
276
+ means only the tokens comprising the top 10% probability mass are considered.
277
+
278
+ We generally recommend altering this or `temperature` but not both.
279
+
280
+ truncation: The truncation strategy to use for the model response.
281
+
282
+ - `auto`: If the input to this Response exceeds the model's context window size,
283
+ the model will truncate the response to fit the context window by dropping
284
+ items from the beginning of the conversation.
285
+ - `disabled` (default): If the input size will exceed the context window size
286
+ for a model, the request will fail with a 400 error.
287
+
288
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
289
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
290
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
291
+ similar requests and to help OpenAI detect and prevent abuse.
292
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
293
+
294
+ extra_headers: Send extra headers
295
+
296
+ extra_query: Add additional query parameters to the request
297
+
298
+ extra_body: Add additional JSON properties to the request
299
+
300
+ timeout: Override the client-level default timeout for this request, in seconds
301
+ """
302
+ return self._post(
303
+ "/responses",
304
+ body=maybe_transform(
305
+ {
306
+ "background": background,
307
+ "conversation": conversation,
308
+ "include": include,
309
+ "input": input,
310
+ "instructions": instructions,
311
+ "max_output_tokens": max_output_tokens,
312
+ "max_tool_calls": max_tool_calls,
313
+ "metadata": metadata,
314
+ "model": model,
315
+ "parallel_tool_calls": parallel_tool_calls,
316
+ "previous_response_id": previous_response_id,
317
+ "prompt": prompt,
318
+ "prompt_cache_key": prompt_cache_key,
319
+ "reasoning": reasoning,
320
+ "safety_identifier": safety_identifier,
321
+ "service_tier": service_tier,
322
+ "store": store,
323
+ "stream": stream,
324
+ "stream_options": stream_options,
325
+ "temperature": temperature,
326
+ "text": text,
327
+ "tool_choice": tool_choice,
328
+ "tools": tools,
329
+ "top_logprobs": top_logprobs,
330
+ "top_p": top_p,
331
+ "truncation": truncation,
332
+ "user": user,
333
+ },
334
+ response_create_params.ResponseCreateParams,
335
+ ),
336
+ options=make_request_options(
337
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
338
+ ),
339
+ cast_to=Response,
340
+ )
341
+
342
+ def retrieve(
343
+ self,
344
+ response_id: str,
345
+ *,
346
+ include: List[Includable] | Omit = omit,
347
+ include_obfuscation: bool | Omit = omit,
348
+ starting_after: int | Omit = omit,
349
+ stream: bool | Omit = omit,
350
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
351
+ # The extra values given here take precedence over values defined on the client or passed to this method.
352
+ extra_headers: Headers | None = None,
353
+ extra_query: Query | None = None,
354
+ extra_body: Body | None = None,
355
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
356
+ ) -> Response:
357
+ """
358
+ Retrieves a model response with the given ID.
359
+
360
+ Args:
361
+ include: Additional fields to include in the response. See the `include` parameter for
362
+ Response creation above for more information.
363
+
364
+ include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
365
+ characters to an `obfuscation` field on streaming delta events to normalize
366
+ payload sizes as a mitigation to certain side-channel attacks. These obfuscation
367
+ fields are included by default, but add a small amount of overhead to the data
368
+ stream. You can set `include_obfuscation` to false to optimize for bandwidth if
369
+ you trust the network links between your application and the OpenAI API.
370
+
371
+ starting_after: The sequence number of the event after which to start streaming.
372
+
373
+ stream: If set to true, the model response data will be streamed to the client as it is
374
+ generated using
375
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
376
+ See the
377
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
378
+ for more information.
379
+
380
+ extra_headers: Send extra headers
381
+
382
+ extra_query: Add additional query parameters to the request
383
+
384
+ extra_body: Add additional JSON properties to the request
385
+
386
+ timeout: Override the client-level default timeout for this request, in seconds
387
+ """
388
+ if not response_id:
389
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
390
+ return self._get(
391
+ f"/responses/{response_id}",
392
+ options=make_request_options(
393
+ extra_headers=extra_headers,
394
+ extra_query=extra_query,
395
+ extra_body=extra_body,
396
+ timeout=timeout,
397
+ query=maybe_transform(
398
+ {
399
+ "include": include,
400
+ "include_obfuscation": include_obfuscation,
401
+ "starting_after": starting_after,
402
+ "stream": stream,
403
+ },
404
+ response_retrieve_params.ResponseRetrieveParams,
405
+ ),
406
+ ),
407
+ cast_to=Response,
408
+ )
409
+
410
+ def delete(
411
+ self,
412
+ response_id: str,
413
+ *,
414
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
415
+ # The extra values given here take precedence over values defined on the client or passed to this method.
416
+ extra_headers: Headers | None = None,
417
+ extra_query: Query | None = None,
418
+ extra_body: Body | None = None,
419
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
420
+ ) -> None:
421
+ """
422
+ Deletes a model response with the given ID.
423
+
424
+ Args:
425
+ extra_headers: Send extra headers
426
+
427
+ extra_query: Add additional query parameters to the request
428
+
429
+ extra_body: Add additional JSON properties to the request
430
+
431
+ timeout: Override the client-level default timeout for this request, in seconds
432
+ """
433
+ if not response_id:
434
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
435
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
436
+ return self._delete(
437
+ f"/responses/{response_id}",
438
+ options=make_request_options(
439
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
440
+ ),
441
+ cast_to=NoneType,
442
+ )
443
+
444
+ def cancel(
445
+ self,
446
+ response_id: str,
447
+ *,
448
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
449
+ # The extra values given here take precedence over values defined on the client or passed to this method.
450
+ extra_headers: Headers | None = None,
451
+ extra_query: Query | None = None,
452
+ extra_body: Body | None = None,
453
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
454
+ ) -> Response:
455
+ """Cancels a model response with the given ID.
456
+
457
+ Only responses created with the
458
+ `background` parameter set to `true` can be cancelled.
459
+ [Learn more](https://platform.openai.com/docs/guides/background).
460
+
461
+ Args:
462
+ extra_headers: Send extra headers
463
+
464
+ extra_query: Add additional query parameters to the request
465
+
466
+ extra_body: Add additional JSON properties to the request
467
+
468
+ timeout: Override the client-level default timeout for this request, in seconds
469
+ """
470
+ if not response_id:
471
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
472
+ return self._post(
473
+ f"/responses/{response_id}/cancel",
474
+ options=make_request_options(
475
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
476
+ ),
477
+ cast_to=Response,
478
+ )
479
+
480
+ def list_input_items(
481
+ self,
482
+ response_id: str,
483
+ *,
484
+ after: str | Omit = omit,
485
+ include: List[Includable] | Omit = omit,
486
+ limit: int | Omit = omit,
487
+ order: Literal["asc", "desc"] | Omit = omit,
488
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
489
+ # The extra values given here take precedence over values defined on the client or passed to this method.
490
+ extra_headers: Headers | None = None,
491
+ extra_query: Query | None = None,
492
+ extra_body: Body | None = None,
493
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
494
+ ) -> ResponseListInputItemsResponse:
495
+ """
496
+ Returns a list of input items for a given response.
497
+
498
+ Args:
499
+ after: An item ID to list items after, used in pagination.
500
+
501
+ include: Additional fields to include in the response. See the `include` parameter for
502
+ Response creation above for more information.
503
+
504
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
505
+ 100, and the default is 20.
506
+
507
+ order: The order to return the input items in. Default is `desc`.
508
+
509
+ - `asc`: Return the input items in ascending order.
510
+ - `desc`: Return the input items in descending order.
511
+
512
+ extra_headers: Send extra headers
513
+
514
+ extra_query: Add additional query parameters to the request
515
+
516
+ extra_body: Add additional JSON properties to the request
517
+
518
+ timeout: Override the client-level default timeout for this request, in seconds
519
+ """
520
+ if not response_id:
521
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
522
+ return self._get(
523
+ f"/responses/{response_id}/input_items",
524
+ options=make_request_options(
525
+ extra_headers=extra_headers,
526
+ extra_query=extra_query,
527
+ extra_body=extra_body,
528
+ timeout=timeout,
529
+ query=maybe_transform(
530
+ {
531
+ "after": after,
532
+ "include": include,
533
+ "limit": limit,
534
+ "order": order,
535
+ },
536
+ response_list_input_items_params.ResponseListInputItemsParams,
537
+ ),
538
+ ),
539
+ cast_to=ResponseListInputItemsResponse,
540
+ )
541
+
542
+
543
+ class AsyncResponsesResource(AsyncAPIResource):
544
+ @cached_property
545
+ def with_raw_response(self) -> AsyncResponsesResourceWithRawResponse:
546
+ """
547
+ This property can be used as a prefix for any HTTP method call to return
548
+ the raw response object instead of the parsed content.
549
+
550
+ For more information, see https://www.github.com/LinXueyuanStdio/agentlin-client-python#accessing-raw-response-data-eg-headers
551
+ """
552
+ return AsyncResponsesResourceWithRawResponse(self)
553
+
554
+ @cached_property
555
+ def with_streaming_response(self) -> AsyncResponsesResourceWithStreamingResponse:
556
+ """
557
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
558
+
559
+ For more information, see https://www.github.com/LinXueyuanStdio/agentlin-client-python#with_streaming_response
560
+ """
561
+ return AsyncResponsesResourceWithStreamingResponse(self)
562
+
563
+ async def create(
564
+ self,
565
+ *,
566
+ background: Optional[bool] | Omit = omit,
567
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
568
+ include: Optional[List[Includable]] | Omit = omit,
569
+ input: Union[str, Iterable[InputItemParam]] | Omit = omit,
570
+ instructions: Optional[str] | Omit = omit,
571
+ max_output_tokens: Optional[int] | Omit = omit,
572
+ max_tool_calls: Optional[int] | Omit = omit,
573
+ metadata: Optional[Dict[str, str]] | Omit = omit,
574
+ model: Literal[
575
+ "o1-pro",
576
+ "o1-pro-2025-03-19",
577
+ "o3-pro",
578
+ "o3-pro-2025-06-10",
579
+ "o3-deep-research",
580
+ "o3-deep-research-2025-06-26",
581
+ "o4-mini-deep-research",
582
+ "o4-mini-deep-research-2025-06-26",
583
+ "computer-use-preview",
584
+ "computer-use-preview-2025-03-11",
585
+ "gpt-5-codex",
586
+ "gpt-5-pro",
587
+ "gpt-5-pro-2025-10-06",
588
+ ]
589
+ | Omit = omit,
590
+ parallel_tool_calls: Optional[bool] | Omit = omit,
591
+ previous_response_id: Optional[str] | Omit = omit,
592
+ prompt: Optional[response_create_params.Prompt] | Omit = omit,
593
+ prompt_cache_key: str | Omit = omit,
594
+ reasoning: Optional[response_create_params.Reasoning] | Omit = omit,
595
+ safety_identifier: str | Omit = omit,
596
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
597
+ store: Optional[bool] | Omit = omit,
598
+ stream: Optional[bool] | Omit = omit,
599
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
600
+ temperature: Optional[float] | Omit = omit,
601
+ text: response_create_params.Text | Omit = omit,
602
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
603
+ tools: Iterable[ResponseToolParam] | Omit = omit,
604
+ top_logprobs: Optional[int] | Omit = omit,
605
+ top_p: Optional[float] | Omit = omit,
606
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
607
+ user: str | Omit = omit,
608
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
609
+ # The extra values given here take precedence over values defined on the client or passed to this method.
610
+ extra_headers: Headers | None = None,
611
+ extra_query: Query | None = None,
612
+ extra_body: Body | None = None,
613
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
614
+ ) -> Response:
615
+ """Creates a model response.
616
+
617
+ Provide
618
+ [text](https://platform.openai.com/docs/guides/text) or
619
+ [image](https://platform.openai.com/docs/guides/images) inputs to generate
620
+ [text](https://platform.openai.com/docs/guides/text) or
621
+ [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
622
+ the model call your own
623
+ [custom code](https://platform.openai.com/docs/guides/function-calling) or use
624
+ built-in [tools](https://platform.openai.com/docs/guides/tools) like
625
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
626
+ [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
627
+ your own data as input for the model's response.
628
+
629
+ Args:
630
+ background: Whether to run the model response in the background.
631
+ [Learn more](https://platform.openai.com/docs/guides/background).
632
+
633
+ conversation: The conversation that this response belongs to. Items from this conversation are
634
+ prepended to `input_items` for this response request. Input items and output
635
+ items from this response are automatically added to this conversation after this
636
+ response completes.
637
+
638
+ include: Specify additional output data to include in the model response. Currently
639
+ supported values are:
640
+
641
+ - `web_search_call.action.sources`: Include the sources of the web search tool
642
+ call.
643
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
644
+ in code interpreter tool call items.
645
+ - `computer_call_output.output.image_url`: Include image urls from the computer
646
+ call output.
647
+ - `file_search_call.results`: Include the search results of the file search tool
648
+ call.
649
+ - `message.input_image.image_url`: Include image urls from the input message.
650
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
651
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
652
+ tokens in reasoning item outputs. This enables reasoning items to be used in
653
+ multi-turn conversations when using the Responses API statelessly (like when
654
+ the `store` parameter is set to `false`, or when an organization is enrolled
655
+ in the zero data retention program).
656
+
657
+ input: Text, image, or file inputs to the model, used to generate a response.
658
+
659
+ Learn more:
660
+
661
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
662
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
663
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
664
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
665
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
666
+
667
+ instructions: A system (or developer) message inserted into the model's context.
668
+
669
+ When using along with `previous_response_id`, the instructions from a previous
670
+ response will not be carried over to the next response. This makes it simple to
671
+ swap out system (or developer) messages in new responses.
672
+
673
+ max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
674
+ including visible output tokens and
675
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
676
+
677
+ max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
678
+ response. This maximum number applies across all built-in tool calls, not per
679
+ individual tool. Any further attempts to call a tool by the model will be
680
+ ignored.
681
+
682
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
683
+ for storing additional information about the object in a structured format, and
684
+ querying for objects via API or the dashboard.
685
+
686
+ Keys are strings with a maximum length of 64 characters. Values are strings with
687
+ a maximum length of 512 characters.
688
+
689
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
690
+ wide range of models with different capabilities, performance characteristics,
691
+ and price points. Refer to the
692
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
693
+ available models.
694
+
695
+ parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
696
+
697
+ previous_response_id: The unique ID of the previous response to the model. Use this to create
698
+ multi-turn conversations. Learn more about
699
+ [conversation state](https://platform.openai.com/docs/guides/conversation-state).
700
+ Cannot be used in conjunction with `conversation`.
701
+
702
+ prompt: Reference to a prompt template and its variables.
703
+ [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
704
+
705
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
706
+ hit rates. Replaces the `user` field.
707
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
708
+
709
+ reasoning: **gpt-5 and o-series models only**
710
+
711
+ Configuration options for
712
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
713
+
714
+ safety_identifier: A stable identifier used to help detect users of your application that may be
715
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
716
+ identifies each user. We recommend hashing their username or email address, in
717
+ order to avoid sending us any identifying information.
718
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
719
+
720
+ service_tier: Specifies the processing type used for serving the request.
721
+
722
+ - If set to 'auto', then the request will be processed with the service tier
723
+ configured in the Project settings. Unless otherwise configured, the Project
724
+ will use 'default'.
725
+ - If set to 'default', then the request will be processed with the standard
726
+ pricing and performance for the selected model.
727
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
728
+ '[priority](https://openai.com/api-priority-processing/)', then the request
729
+ will be processed with the corresponding service tier.
730
+ - When not set, the default behavior is 'auto'.
731
+
732
+ When the `service_tier` parameter is set, the response body will include the
733
+ `service_tier` value based on the processing mode actually used to serve the
734
+ request. This response value may be different from the value set in the
735
+ parameter.
736
+
737
+ store: Whether to store the generated model response for later retrieval via API.
738
+
739
+ stream: If set to true, the model response data will be streamed to the client as it is
740
+ generated using
741
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
742
+ See the
743
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
744
+ for more information.
745
+
746
+ stream_options: Options for streaming responses. Only set this when you set `stream: true`.
747
+
748
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
749
+ make the output more random, while lower values like 0.2 will make it more
750
+ focused and deterministic. We generally recommend altering this or `top_p` but
751
+ not both.
752
+
753
+ text: Configuration options for a text response from the model. Can be plain text or
754
+ structured JSON data. Learn more:
755
+
756
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
757
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
758
+
759
+ tool_choice: How the model should select which tool (or tools) to use when generating a
760
+ response. See the `tools` parameter to see how to specify which tools the model
761
+ can call.
762
+
763
+ tools: An array of tools the model may call while generating a response. You can
764
+ specify which tool to use by setting the `tool_choice` parameter.
765
+
766
+ We support the following categories of tools:
767
+
768
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
769
+ capabilities, like
770
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
771
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
772
+ Learn more about
773
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
774
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
775
+ predefined connectors such as Google Drive and SharePoint. Learn more about
776
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
777
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
778
+ the model to call your own code with strongly typed arguments and outputs.
779
+ Learn more about
780
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
781
+ You can also use custom tools to call your own code.
782
+
783
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
784
+ return at each token position, each with an associated log probability.
785
+
786
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
787
+ model considers the results of the tokens with top_p probability mass. So 0.1
788
+ means only the tokens comprising the top 10% probability mass are considered.
789
+
790
+ We generally recommend altering this or `temperature` but not both.
791
+
792
+ truncation: The truncation strategy to use for the model response.
793
+
794
+ - `auto`: If the input to this Response exceeds the model's context window size,
795
+ the model will truncate the response to fit the context window by dropping
796
+ items from the beginning of the conversation.
797
+ - `disabled` (default): If the input size will exceed the context window size
798
+ for a model, the request will fail with a 400 error.
799
+
800
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
801
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
802
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
803
+ similar requests and to help OpenAI detect and prevent abuse.
804
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
805
+
806
+ extra_headers: Send extra headers
807
+
808
+ extra_query: Add additional query parameters to the request
809
+
810
+ extra_body: Add additional JSON properties to the request
811
+
812
+ timeout: Override the client-level default timeout for this request, in seconds
813
+ """
814
+ return await self._post(
815
+ "/responses",
816
+ body=await async_maybe_transform(
817
+ {
818
+ "background": background,
819
+ "conversation": conversation,
820
+ "include": include,
821
+ "input": input,
822
+ "instructions": instructions,
823
+ "max_output_tokens": max_output_tokens,
824
+ "max_tool_calls": max_tool_calls,
825
+ "metadata": metadata,
826
+ "model": model,
827
+ "parallel_tool_calls": parallel_tool_calls,
828
+ "previous_response_id": previous_response_id,
829
+ "prompt": prompt,
830
+ "prompt_cache_key": prompt_cache_key,
831
+ "reasoning": reasoning,
832
+ "safety_identifier": safety_identifier,
833
+ "service_tier": service_tier,
834
+ "store": store,
835
+ "stream": stream,
836
+ "stream_options": stream_options,
837
+ "temperature": temperature,
838
+ "text": text,
839
+ "tool_choice": tool_choice,
840
+ "tools": tools,
841
+ "top_logprobs": top_logprobs,
842
+ "top_p": top_p,
843
+ "truncation": truncation,
844
+ "user": user,
845
+ },
846
+ response_create_params.ResponseCreateParams,
847
+ ),
848
+ options=make_request_options(
849
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
850
+ ),
851
+ cast_to=Response,
852
+ )
853
+
854
+ async def retrieve(
855
+ self,
856
+ response_id: str,
857
+ *,
858
+ include: List[Includable] | Omit = omit,
859
+ include_obfuscation: bool | Omit = omit,
860
+ starting_after: int | Omit = omit,
861
+ stream: bool | Omit = omit,
862
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
863
+ # The extra values given here take precedence over values defined on the client or passed to this method.
864
+ extra_headers: Headers | None = None,
865
+ extra_query: Query | None = None,
866
+ extra_body: Body | None = None,
867
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
868
+ ) -> Response:
869
+ """
870
+ Retrieves a model response with the given ID.
871
+
872
+ Args:
873
+ include: Additional fields to include in the response. See the `include` parameter for
874
+ Response creation above for more information.
875
+
876
+ include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
877
+ characters to an `obfuscation` field on streaming delta events to normalize
878
+ payload sizes as a mitigation to certain side-channel attacks. These obfuscation
879
+ fields are included by default, but add a small amount of overhead to the data
880
+ stream. You can set `include_obfuscation` to false to optimize for bandwidth if
881
+ you trust the network links between your application and the OpenAI API.
882
+
883
+ starting_after: The sequence number of the event after which to start streaming.
884
+
885
+ stream: If set to true, the model response data will be streamed to the client as it is
886
+ generated using
887
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
888
+ See the
889
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
890
+ for more information.
891
+
892
+ extra_headers: Send extra headers
893
+
894
+ extra_query: Add additional query parameters to the request
895
+
896
+ extra_body: Add additional JSON properties to the request
897
+
898
+ timeout: Override the client-level default timeout for this request, in seconds
899
+ """
900
+ if not response_id:
901
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
902
+ return await self._get(
903
+ f"/responses/{response_id}",
904
+ options=make_request_options(
905
+ extra_headers=extra_headers,
906
+ extra_query=extra_query,
907
+ extra_body=extra_body,
908
+ timeout=timeout,
909
+ query=await async_maybe_transform(
910
+ {
911
+ "include": include,
912
+ "include_obfuscation": include_obfuscation,
913
+ "starting_after": starting_after,
914
+ "stream": stream,
915
+ },
916
+ response_retrieve_params.ResponseRetrieveParams,
917
+ ),
918
+ ),
919
+ cast_to=Response,
920
+ )
921
+
922
+ async def delete(
923
+ self,
924
+ response_id: str,
925
+ *,
926
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
927
+ # The extra values given here take precedence over values defined on the client or passed to this method.
928
+ extra_headers: Headers | None = None,
929
+ extra_query: Query | None = None,
930
+ extra_body: Body | None = None,
931
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
932
+ ) -> None:
933
+ """
934
+ Deletes a model response with the given ID.
935
+
936
+ Args:
937
+ extra_headers: Send extra headers
938
+
939
+ extra_query: Add additional query parameters to the request
940
+
941
+ extra_body: Add additional JSON properties to the request
942
+
943
+ timeout: Override the client-level default timeout for this request, in seconds
944
+ """
945
+ if not response_id:
946
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
947
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
948
+ return await self._delete(
949
+ f"/responses/{response_id}",
950
+ options=make_request_options(
951
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
952
+ ),
953
+ cast_to=NoneType,
954
+ )
955
+
956
+ async def cancel(
957
+ self,
958
+ response_id: str,
959
+ *,
960
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
961
+ # The extra values given here take precedence over values defined on the client or passed to this method.
962
+ extra_headers: Headers | None = None,
963
+ extra_query: Query | None = None,
964
+ extra_body: Body | None = None,
965
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
966
+ ) -> Response:
967
+ """Cancels a model response with the given ID.
968
+
969
+ Only responses created with the
970
+ `background` parameter set to `true` can be cancelled.
971
+ [Learn more](https://platform.openai.com/docs/guides/background).
972
+
973
+ Args:
974
+ extra_headers: Send extra headers
975
+
976
+ extra_query: Add additional query parameters to the request
977
+
978
+ extra_body: Add additional JSON properties to the request
979
+
980
+ timeout: Override the client-level default timeout for this request, in seconds
981
+ """
982
+ if not response_id:
983
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
984
+ return await self._post(
985
+ f"/responses/{response_id}/cancel",
986
+ options=make_request_options(
987
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
988
+ ),
989
+ cast_to=Response,
990
+ )
991
+
992
+ async def list_input_items(
993
+ self,
994
+ response_id: str,
995
+ *,
996
+ after: str | Omit = omit,
997
+ include: List[Includable] | Omit = omit,
998
+ limit: int | Omit = omit,
999
+ order: Literal["asc", "desc"] | Omit = omit,
1000
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1001
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1002
+ extra_headers: Headers | None = None,
1003
+ extra_query: Query | None = None,
1004
+ extra_body: Body | None = None,
1005
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1006
+ ) -> ResponseListInputItemsResponse:
1007
+ """
1008
+ Returns a list of input items for a given response.
1009
+
1010
+ Args:
1011
+ after: An item ID to list items after, used in pagination.
1012
+
1013
+ include: Additional fields to include in the response. See the `include` parameter for
1014
+ Response creation above for more information.
1015
+
1016
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
1017
+ 100, and the default is 20.
1018
+
1019
+ order: The order to return the input items in. Default is `desc`.
1020
+
1021
+ - `asc`: Return the input items in ascending order.
1022
+ - `desc`: Return the input items in descending order.
1023
+
1024
+ extra_headers: Send extra headers
1025
+
1026
+ extra_query: Add additional query parameters to the request
1027
+
1028
+ extra_body: Add additional JSON properties to the request
1029
+
1030
+ timeout: Override the client-level default timeout for this request, in seconds
1031
+ """
1032
+ if not response_id:
1033
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
1034
+ return await self._get(
1035
+ f"/responses/{response_id}/input_items",
1036
+ options=make_request_options(
1037
+ extra_headers=extra_headers,
1038
+ extra_query=extra_query,
1039
+ extra_body=extra_body,
1040
+ timeout=timeout,
1041
+ query=await async_maybe_transform(
1042
+ {
1043
+ "after": after,
1044
+ "include": include,
1045
+ "limit": limit,
1046
+ "order": order,
1047
+ },
1048
+ response_list_input_items_params.ResponseListInputItemsParams,
1049
+ ),
1050
+ ),
1051
+ cast_to=ResponseListInputItemsResponse,
1052
+ )
1053
+
1054
+
1055
+ class ResponsesResourceWithRawResponse:
1056
+ def __init__(self, responses: ResponsesResource) -> None:
1057
+ self._responses = responses
1058
+
1059
+ self.create = to_raw_response_wrapper(
1060
+ responses.create,
1061
+ )
1062
+ self.retrieve = to_raw_response_wrapper(
1063
+ responses.retrieve,
1064
+ )
1065
+ self.delete = to_raw_response_wrapper(
1066
+ responses.delete,
1067
+ )
1068
+ self.cancel = to_raw_response_wrapper(
1069
+ responses.cancel,
1070
+ )
1071
+ self.list_input_items = to_raw_response_wrapper(
1072
+ responses.list_input_items,
1073
+ )
1074
+
1075
+
1076
+ class AsyncResponsesResourceWithRawResponse:
1077
+ def __init__(self, responses: AsyncResponsesResource) -> None:
1078
+ self._responses = responses
1079
+
1080
+ self.create = async_to_raw_response_wrapper(
1081
+ responses.create,
1082
+ )
1083
+ self.retrieve = async_to_raw_response_wrapper(
1084
+ responses.retrieve,
1085
+ )
1086
+ self.delete = async_to_raw_response_wrapper(
1087
+ responses.delete,
1088
+ )
1089
+ self.cancel = async_to_raw_response_wrapper(
1090
+ responses.cancel,
1091
+ )
1092
+ self.list_input_items = async_to_raw_response_wrapper(
1093
+ responses.list_input_items,
1094
+ )
1095
+
1096
+
1097
+ class ResponsesResourceWithStreamingResponse:
1098
+ def __init__(self, responses: ResponsesResource) -> None:
1099
+ self._responses = responses
1100
+
1101
+ self.create = to_streamed_response_wrapper(
1102
+ responses.create,
1103
+ )
1104
+ self.retrieve = to_streamed_response_wrapper(
1105
+ responses.retrieve,
1106
+ )
1107
+ self.delete = to_streamed_response_wrapper(
1108
+ responses.delete,
1109
+ )
1110
+ self.cancel = to_streamed_response_wrapper(
1111
+ responses.cancel,
1112
+ )
1113
+ self.list_input_items = to_streamed_response_wrapper(
1114
+ responses.list_input_items,
1115
+ )
1116
+
1117
+
1118
+ class AsyncResponsesResourceWithStreamingResponse:
1119
+ def __init__(self, responses: AsyncResponsesResource) -> None:
1120
+ self._responses = responses
1121
+
1122
+ self.create = async_to_streamed_response_wrapper(
1123
+ responses.create,
1124
+ )
1125
+ self.retrieve = async_to_streamed_response_wrapper(
1126
+ responses.retrieve,
1127
+ )
1128
+ self.delete = async_to_streamed_response_wrapper(
1129
+ responses.delete,
1130
+ )
1131
+ self.cancel = async_to_streamed_response_wrapper(
1132
+ responses.cancel,
1133
+ )
1134
+ self.list_input_items = async_to_streamed_response_wrapper(
1135
+ responses.list_input_items,
1136
+ )