agentlin-client 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agentlin-client might be problematic. Click here for more details.
- agentlin_client/__init__.py +90 -0
- agentlin_client/_base_client.py +1995 -0
- agentlin_client/_client.py +403 -0
- agentlin_client/_compat.py +219 -0
- agentlin_client/_constants.py +14 -0
- agentlin_client/_exceptions.py +108 -0
- agentlin_client/_files.py +123 -0
- agentlin_client/_models.py +835 -0
- agentlin_client/_qs.py +150 -0
- agentlin_client/_resource.py +43 -0
- agentlin_client/_response.py +832 -0
- agentlin_client/_streaming.py +333 -0
- agentlin_client/_types.py +260 -0
- agentlin_client/_utils/__init__.py +64 -0
- agentlin_client/_utils/_compat.py +45 -0
- agentlin_client/_utils/_datetime_parse.py +136 -0
- agentlin_client/_utils/_logs.py +25 -0
- agentlin_client/_utils/_proxy.py +65 -0
- agentlin_client/_utils/_reflection.py +42 -0
- agentlin_client/_utils/_resources_proxy.py +24 -0
- agentlin_client/_utils/_streams.py +12 -0
- agentlin_client/_utils/_sync.py +86 -0
- agentlin_client/_utils/_transform.py +457 -0
- agentlin_client/_utils/_typing.py +156 -0
- agentlin_client/_utils/_utils.py +421 -0
- agentlin_client/_version.py +4 -0
- agentlin_client/lib/.keep +4 -0
- agentlin_client/py.typed +0 -0
- agentlin_client/resources/__init__.py +33 -0
- agentlin_client/resources/conversations/__init__.py +33 -0
- agentlin_client/resources/conversations/conversations.py +489 -0
- agentlin_client/resources/conversations/items.py +558 -0
- agentlin_client/resources/responses.py +1136 -0
- agentlin_client/types/__init__.py +22 -0
- agentlin_client/types/conversation_create_params.py +28 -0
- agentlin_client/types/conversation_delete_response.py +15 -0
- agentlin_client/types/conversation_update_params.py +20 -0
- agentlin_client/types/conversations/__init__.py +74 -0
- agentlin_client/types/conversations/code_interpreter_tool_call.py +55 -0
- agentlin_client/types/conversations/code_interpreter_tool_call_param.py +54 -0
- agentlin_client/types/conversations/computer_screenshot_image.py +22 -0
- agentlin_client/types/conversations/computer_screenshot_image_param.py +21 -0
- agentlin_client/types/conversations/computer_tool_call.py +201 -0
- agentlin_client/types/conversations/computer_tool_call_output_resource.py +37 -0
- agentlin_client/types/conversations/computer_tool_call_param.py +199 -0
- agentlin_client/types/conversations/computer_tool_call_safety_check.py +16 -0
- agentlin_client/types/conversations/computer_tool_call_safety_check_param.py +18 -0
- agentlin_client/types/conversations/conversation_item.py +133 -0
- agentlin_client/types/conversations/conversation_item_list.py +26 -0
- agentlin_client/types/conversations/conversation_resource.py +32 -0
- agentlin_client/types/conversations/custom_tool_call.py +25 -0
- agentlin_client/types/conversations/custom_tool_call_output.py +26 -0
- agentlin_client/types/conversations/custom_tool_call_output_param.py +27 -0
- agentlin_client/types/conversations/custom_tool_call_param.py +24 -0
- agentlin_client/types/conversations/easy_input_message.py +26 -0
- agentlin_client/types/conversations/easy_input_message_param.py +27 -0
- agentlin_client/types/conversations/file_search_tool_call.py +42 -0
- agentlin_client/types/conversations/file_search_tool_call_param.py +44 -0
- agentlin_client/types/conversations/function_and_custom_tool_call_output.py +15 -0
- agentlin_client/types/conversations/function_and_custom_tool_call_output_param.py +16 -0
- agentlin_client/types/conversations/function_call_item_status.py +7 -0
- agentlin_client/types/conversations/function_tool_call.py +32 -0
- agentlin_client/types/conversations/function_tool_call_output_resource.py +33 -0
- agentlin_client/types/conversations/function_tool_call_param.py +31 -0
- agentlin_client/types/conversations/function_tool_call_resource.py +10 -0
- agentlin_client/types/conversations/image_gen_tool_call.py +22 -0
- agentlin_client/types/conversations/image_gen_tool_call_param.py +22 -0
- agentlin_client/types/conversations/includable.py +14 -0
- agentlin_client/types/conversations/input_content.py +32 -0
- agentlin_client/types/conversations/input_content_param.py +30 -0
- agentlin_client/types/conversations/input_file_content.py +25 -0
- agentlin_client/types/conversations/input_file_content_param.py +25 -0
- agentlin_client/types/conversations/input_image_content.py +28 -0
- agentlin_client/types/conversations/input_image_content_param.py +28 -0
- agentlin_client/types/conversations/input_item.py +209 -0
- agentlin_client/types/conversations/input_item_param.py +203 -0
- agentlin_client/types/conversations/input_message.py +30 -0
- agentlin_client/types/conversations/input_message_param.py +31 -0
- agentlin_client/types/conversations/input_text_content.py +15 -0
- agentlin_client/types/conversations/input_text_content_param.py +15 -0
- agentlin_client/types/conversations/item_create_params.py +24 -0
- agentlin_client/types/conversations/item_list_params.py +50 -0
- agentlin_client/types/conversations/item_retrieve_params.py +22 -0
- agentlin_client/types/conversations/local_shell_tool_call.py +45 -0
- agentlin_client/types/conversations/local_shell_tool_call_output.py +22 -0
- agentlin_client/types/conversations/local_shell_tool_call_output_param.py +22 -0
- agentlin_client/types/conversations/local_shell_tool_call_param.py +47 -0
- agentlin_client/types/conversations/mcp_approval_request.py +24 -0
- agentlin_client/types/conversations/mcp_approval_request_param.py +24 -0
- agentlin_client/types/conversations/mcp_approval_response_resource.py +25 -0
- agentlin_client/types/conversations/mcp_list_tools.py +39 -0
- agentlin_client/types/conversations/mcp_list_tools_param.py +39 -0
- agentlin_client/types/conversations/mcp_tool_call.py +44 -0
- agentlin_client/types/conversations/mcp_tool_call_param.py +44 -0
- agentlin_client/types/conversations/output_message.py +34 -0
- agentlin_client/types/conversations/output_message_param.py +34 -0
- agentlin_client/types/conversations/output_text_content.py +117 -0
- agentlin_client/types/conversations/output_text_content_param.py +115 -0
- agentlin_client/types/conversations/reasoning_item.py +44 -0
- agentlin_client/types/conversations/reasoning_item_param.py +45 -0
- agentlin_client/types/conversations/reasoning_text_content.py +15 -0
- agentlin_client/types/conversations/reasoning_text_content_param.py +15 -0
- agentlin_client/types/conversations/refusal_content.py +15 -0
- agentlin_client/types/conversations/refusal_content_param.py +15 -0
- agentlin_client/types/conversations/web_search_tool_call.py +67 -0
- agentlin_client/types/conversations/web_search_tool_call_param.py +66 -0
- agentlin_client/types/mcp_tool_filter.py +20 -0
- agentlin_client/types/mcp_tool_filter_param.py +22 -0
- agentlin_client/types/model_response_properties_standard.py +87 -0
- agentlin_client/types/response.py +166 -0
- agentlin_client/types/response_create_params.py +497 -0
- agentlin_client/types/response_list_input_items_params.py +34 -0
- agentlin_client/types/response_list_input_items_response.py +70 -0
- agentlin_client/types/response_properties.py +328 -0
- agentlin_client/types/response_retrieve_params.py +42 -0
- agentlin_client/types/response_tool.py +495 -0
- agentlin_client/types/response_tool_param.py +491 -0
- agentlin_client/types/text_response_format_configuration.py +59 -0
- agentlin_client/types/text_response_format_configuration_param.py +54 -0
- agentlin_client-0.1.0.dist-info/METADATA +429 -0
- agentlin_client-0.1.0.dist-info/RECORD +123 -0
- agentlin_client-0.1.0.dist-info/WHEEL +4 -0
- agentlin_client-0.1.0.dist-info/licenses/LICENSE +7 -0
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Union, Iterable
|
|
6
|
+
from typing_extensions import Literal, Required, TypeAlias, TypedDict
|
|
7
|
+
|
|
8
|
+
__all__ = ["WebSearchToolCallParam", "Action", "ActionSearch", "ActionSearchSource", "ActionOpenPage", "ActionFind"]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ActionSearchSource(TypedDict, total=False):
|
|
12
|
+
type: Required[Literal["url"]]
|
|
13
|
+
"""The type of source. Always `url`."""
|
|
14
|
+
|
|
15
|
+
url: Required[str]
|
|
16
|
+
"""The URL of the source."""
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ActionSearch(TypedDict, total=False):
|
|
20
|
+
query: Required[str]
|
|
21
|
+
"""The search query."""
|
|
22
|
+
|
|
23
|
+
type: Required[Literal["search"]]
|
|
24
|
+
"""The action type."""
|
|
25
|
+
|
|
26
|
+
sources: Iterable[ActionSearchSource]
|
|
27
|
+
"""The sources used in the search."""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class ActionOpenPage(TypedDict, total=False):
|
|
31
|
+
type: Required[Literal["open_page"]]
|
|
32
|
+
"""The action type."""
|
|
33
|
+
|
|
34
|
+
url: Required[str]
|
|
35
|
+
"""The URL opened by the model."""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class ActionFind(TypedDict, total=False):
|
|
39
|
+
pattern: Required[str]
|
|
40
|
+
"""The pattern or text to search for within the page."""
|
|
41
|
+
|
|
42
|
+
type: Required[Literal["find"]]
|
|
43
|
+
"""The action type."""
|
|
44
|
+
|
|
45
|
+
url: Required[str]
|
|
46
|
+
"""The URL of the page searched for the pattern."""
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
Action: TypeAlias = Union[ActionSearch, ActionOpenPage, ActionFind]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class WebSearchToolCallParam(TypedDict, total=False):
|
|
53
|
+
id: Required[str]
|
|
54
|
+
"""The unique ID of the web search tool call."""
|
|
55
|
+
|
|
56
|
+
action: Required[Action]
|
|
57
|
+
"""
|
|
58
|
+
An object describing the specific action taken in this web search call. Includes
|
|
59
|
+
details on how the model used the web (search, open_page, find).
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
status: Required[Literal["in_progress", "searching", "completed", "failed"]]
|
|
63
|
+
"""The status of the web search tool call."""
|
|
64
|
+
|
|
65
|
+
type: Required[Literal["web_search_call"]]
|
|
66
|
+
"""The type of the web search tool call. Always `web_search_call`."""
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List, Optional
|
|
4
|
+
|
|
5
|
+
from .._models import BaseModel
|
|
6
|
+
|
|
7
|
+
__all__ = ["McpToolFilter"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class McpToolFilter(BaseModel):
|
|
11
|
+
read_only: Optional[bool] = None
|
|
12
|
+
"""Indicates whether or not a tool modifies data or is read-only.
|
|
13
|
+
|
|
14
|
+
If an MCP server is
|
|
15
|
+
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
|
|
16
|
+
it will match this filter.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
tool_names: Optional[List[str]] = None
|
|
20
|
+
"""List of allowed tool names."""
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing_extensions import TypedDict
|
|
6
|
+
|
|
7
|
+
from .._types import SequenceNotStr
|
|
8
|
+
|
|
9
|
+
__all__ = ["McpToolFilterParam"]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class McpToolFilterParam(TypedDict, total=False):
|
|
13
|
+
read_only: bool
|
|
14
|
+
"""Indicates whether or not a tool modifies data or is read-only.
|
|
15
|
+
|
|
16
|
+
If an MCP server is
|
|
17
|
+
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
|
|
18
|
+
it will match this filter.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
tool_names: SequenceNotStr[str]
|
|
22
|
+
"""List of allowed tool names."""
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import Dict, Optional
|
|
4
|
+
from typing_extensions import Literal
|
|
5
|
+
|
|
6
|
+
from .._models import BaseModel
|
|
7
|
+
|
|
8
|
+
__all__ = ["ModelResponsePropertiesStandard"]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ModelResponsePropertiesStandard(BaseModel):
|
|
12
|
+
metadata: Optional[Dict[str, str]] = None
|
|
13
|
+
"""Set of 16 key-value pairs that can be attached to an object.
|
|
14
|
+
|
|
15
|
+
This can be useful for storing additional information about the object in a
|
|
16
|
+
structured format, and querying for objects via API or the dashboard.
|
|
17
|
+
|
|
18
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
19
|
+
a maximum length of 512 characters.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
prompt_cache_key: Optional[str] = None
|
|
23
|
+
"""
|
|
24
|
+
Used by OpenAI to cache responses for similar requests to optimize your cache
|
|
25
|
+
hit rates. Replaces the `user` field.
|
|
26
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
safety_identifier: Optional[str] = None
|
|
30
|
+
"""
|
|
31
|
+
A stable identifier used to help detect users of your application that may be
|
|
32
|
+
violating OpenAI's usage policies. The IDs should be a string that uniquely
|
|
33
|
+
identifies each user. We recommend hashing their username or email address, in
|
|
34
|
+
order to avoid sending us any identifying information.
|
|
35
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None
|
|
39
|
+
"""Specifies the processing type used for serving the request.
|
|
40
|
+
|
|
41
|
+
- If set to 'auto', then the request will be processed with the service tier
|
|
42
|
+
configured in the Project settings. Unless otherwise configured, the Project
|
|
43
|
+
will use 'default'.
|
|
44
|
+
- If set to 'default', then the request will be processed with the standard
|
|
45
|
+
pricing and performance for the selected model.
|
|
46
|
+
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
|
47
|
+
'[priority](https://openai.com/api-priority-processing/)', then the request
|
|
48
|
+
will be processed with the corresponding service tier.
|
|
49
|
+
- When not set, the default behavior is 'auto'.
|
|
50
|
+
|
|
51
|
+
When the `service_tier` parameter is set, the response body will include the
|
|
52
|
+
`service_tier` value based on the processing mode actually used to serve the
|
|
53
|
+
request. This response value may be different from the value set in the
|
|
54
|
+
parameter.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
temperature: Optional[float] = None
|
|
58
|
+
"""What sampling temperature to use, between 0 and 2.
|
|
59
|
+
|
|
60
|
+
Higher values like 0.8 will make the output more random, while lower values like
|
|
61
|
+
0.2 will make it more focused and deterministic. We generally recommend altering
|
|
62
|
+
this or `top_p` but not both.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
top_logprobs: Optional[int] = None
|
|
66
|
+
"""
|
|
67
|
+
An integer between 0 and 20 specifying the number of most likely tokens to
|
|
68
|
+
return at each token position, each with an associated log probability.
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
top_p: Optional[float] = None
|
|
72
|
+
"""
|
|
73
|
+
An alternative to sampling with temperature, called nucleus sampling, where the
|
|
74
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
75
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
76
|
+
|
|
77
|
+
We generally recommend altering this or `temperature` but not both.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
user: Optional[str] = None
|
|
81
|
+
"""This field is being replaced by `safety_identifier` and `prompt_cache_key`.
|
|
82
|
+
|
|
83
|
+
Use `prompt_cache_key` instead to maintain caching optimizations. A stable
|
|
84
|
+
identifier for your end-users. Used to boost cache hit rates by better bucketing
|
|
85
|
+
similar requests and to help OpenAI detect and prevent abuse.
|
|
86
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
87
|
+
"""
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List, Union, Optional
|
|
4
|
+
from typing_extensions import Literal, Annotated, TypeAlias
|
|
5
|
+
|
|
6
|
+
from .._utils import PropertyInfo
|
|
7
|
+
from .._models import BaseModel
|
|
8
|
+
from .response_properties import ResponseProperties
|
|
9
|
+
from .conversations.input_item import InputItem
|
|
10
|
+
from .conversations.mcp_tool_call import McpToolCall
|
|
11
|
+
from .conversations.mcp_list_tools import McpListTools
|
|
12
|
+
from .conversations.output_message import OutputMessage
|
|
13
|
+
from .conversations.reasoning_item import ReasoningItem
|
|
14
|
+
from .conversations.custom_tool_call import CustomToolCall
|
|
15
|
+
from .conversations.computer_tool_call import ComputerToolCall
|
|
16
|
+
from .conversations.function_tool_call import FunctionToolCall
|
|
17
|
+
from .conversations.image_gen_tool_call import ImageGenToolCall
|
|
18
|
+
from .conversations.mcp_approval_request import McpApprovalRequest
|
|
19
|
+
from .conversations.web_search_tool_call import WebSearchToolCall
|
|
20
|
+
from .model_response_properties_standard import ModelResponsePropertiesStandard
|
|
21
|
+
from .conversations.file_search_tool_call import FileSearchToolCall
|
|
22
|
+
from .conversations.local_shell_tool_call import LocalShellToolCall
|
|
23
|
+
from .conversations.code_interpreter_tool_call import CodeInterpreterToolCall
|
|
24
|
+
|
|
25
|
+
__all__ = [
|
|
26
|
+
"Response",
|
|
27
|
+
"ResponseError",
|
|
28
|
+
"ResponseIncompleteDetails",
|
|
29
|
+
"ResponseOutput",
|
|
30
|
+
"ResponseUsage",
|
|
31
|
+
"ResponseUsageInputTokensDetails",
|
|
32
|
+
"ResponseUsageOutputTokensDetails",
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class ResponseError(BaseModel):
|
|
37
|
+
code: Literal[
|
|
38
|
+
"server_error",
|
|
39
|
+
"rate_limit_exceeded",
|
|
40
|
+
"invalid_prompt",
|
|
41
|
+
"vector_store_timeout",
|
|
42
|
+
"invalid_image",
|
|
43
|
+
"invalid_image_format",
|
|
44
|
+
"invalid_base64_image",
|
|
45
|
+
"invalid_image_url",
|
|
46
|
+
"image_too_large",
|
|
47
|
+
"image_too_small",
|
|
48
|
+
"image_parse_error",
|
|
49
|
+
"image_content_policy_violation",
|
|
50
|
+
"invalid_image_mode",
|
|
51
|
+
"image_file_too_large",
|
|
52
|
+
"unsupported_image_media_type",
|
|
53
|
+
"empty_image_file",
|
|
54
|
+
"failed_to_download_image",
|
|
55
|
+
"image_file_not_found",
|
|
56
|
+
]
|
|
57
|
+
"""The error code for the response."""
|
|
58
|
+
|
|
59
|
+
message: str
|
|
60
|
+
"""A human-readable description of the error."""
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class ResponseIncompleteDetails(BaseModel):
|
|
64
|
+
reason: Optional[Literal["max_output_tokens", "content_filter"]] = None
|
|
65
|
+
"""The reason why the response is incomplete."""
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
ResponseOutput: TypeAlias = Annotated[
|
|
69
|
+
Union[
|
|
70
|
+
OutputMessage,
|
|
71
|
+
FileSearchToolCall,
|
|
72
|
+
FunctionToolCall,
|
|
73
|
+
WebSearchToolCall,
|
|
74
|
+
ComputerToolCall,
|
|
75
|
+
ReasoningItem,
|
|
76
|
+
ImageGenToolCall,
|
|
77
|
+
CodeInterpreterToolCall,
|
|
78
|
+
LocalShellToolCall,
|
|
79
|
+
McpToolCall,
|
|
80
|
+
McpListTools,
|
|
81
|
+
McpApprovalRequest,
|
|
82
|
+
CustomToolCall,
|
|
83
|
+
],
|
|
84
|
+
PropertyInfo(discriminator="type"),
|
|
85
|
+
]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class ResponseUsageInputTokensDetails(BaseModel):
|
|
89
|
+
cached_tokens: int
|
|
90
|
+
"""
|
|
91
|
+
The number of tokens that were retrieved from the cache.
|
|
92
|
+
[More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class ResponseUsageOutputTokensDetails(BaseModel):
|
|
97
|
+
reasoning_tokens: int
|
|
98
|
+
"""The number of reasoning tokens."""
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class ResponseUsage(BaseModel):
|
|
102
|
+
input_tokens: int
|
|
103
|
+
"""The number of input tokens."""
|
|
104
|
+
|
|
105
|
+
input_tokens_details: ResponseUsageInputTokensDetails
|
|
106
|
+
"""A detailed breakdown of the input tokens."""
|
|
107
|
+
|
|
108
|
+
output_tokens: int
|
|
109
|
+
"""The number of output tokens."""
|
|
110
|
+
|
|
111
|
+
output_tokens_details: ResponseUsageOutputTokensDetails
|
|
112
|
+
"""A detailed breakdown of the output tokens."""
|
|
113
|
+
|
|
114
|
+
total_tokens: int
|
|
115
|
+
"""The total number of tokens used."""
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class Response(ModelResponsePropertiesStandard, ResponseProperties):
|
|
119
|
+
id: str
|
|
120
|
+
"""Unique identifier for this Response."""
|
|
121
|
+
|
|
122
|
+
created_at: float
|
|
123
|
+
"""Unix timestamp (in seconds) of when this Response was created."""
|
|
124
|
+
|
|
125
|
+
error: Optional[ResponseError] = None
|
|
126
|
+
"""An error object returned when the model fails to generate a Response."""
|
|
127
|
+
|
|
128
|
+
incomplete_details: Optional[ResponseIncompleteDetails] = None
|
|
129
|
+
"""Details about why the response is incomplete."""
|
|
130
|
+
|
|
131
|
+
instructions: Union[str, List[InputItem], None] = None
|
|
132
|
+
"""A system (or developer) message inserted into the model's context.
|
|
133
|
+
|
|
134
|
+
When using along with `previous_response_id`, the instructions from a previous
|
|
135
|
+
response will not be carried over to the next response. This makes it simple to
|
|
136
|
+
swap out system (or developer) messages in new responses.
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
object: Literal["response"]
|
|
140
|
+
"""The object type of this resource - always set to `response`."""
|
|
141
|
+
|
|
142
|
+
output: List[ResponseOutput]
|
|
143
|
+
"""An array of content items generated by the model.
|
|
144
|
+
|
|
145
|
+
- The length and order of items in the `output` array is dependent on the
|
|
146
|
+
model's response.
|
|
147
|
+
- Rather than accessing the first item in the `output` array and assuming it's
|
|
148
|
+
an `assistant` message with the content generated by the model, you might
|
|
149
|
+
consider using the `output_text` property where supported in SDKs.
|
|
150
|
+
"""
|
|
151
|
+
|
|
152
|
+
parallel_tool_calls: bool
|
|
153
|
+
"""Whether to allow the model to run tool calls in parallel."""
|
|
154
|
+
|
|
155
|
+
status: Optional[Literal["completed", "failed", "in_progress", "cancelled", "queued", "incomplete"]] = None
|
|
156
|
+
"""The status of the response generation.
|
|
157
|
+
|
|
158
|
+
One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or
|
|
159
|
+
`incomplete`.
|
|
160
|
+
"""
|
|
161
|
+
|
|
162
|
+
usage: Optional[ResponseUsage] = None
|
|
163
|
+
"""
|
|
164
|
+
Represents token usage details including input tokens, output tokens, a
|
|
165
|
+
breakdown of output tokens, and the total tokens used.
|
|
166
|
+
"""
|