perplexityai 0.22.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- perplexity/__init__.py +102 -0
- perplexity/_base_client.py +2001 -0
- perplexity/_client.py +529 -0
- perplexity/_compat.py +219 -0
- perplexity/_constants.py +14 -0
- perplexity/_exceptions.py +108 -0
- perplexity/_files.py +123 -0
- perplexity/_models.py +857 -0
- perplexity/_qs.py +150 -0
- perplexity/_resource.py +43 -0
- perplexity/_response.py +832 -0
- perplexity/_streaming.py +371 -0
- perplexity/_types.py +261 -0
- perplexity/_utils/__init__.py +64 -0
- perplexity/_utils/_compat.py +45 -0
- perplexity/_utils/_datetime_parse.py +136 -0
- perplexity/_utils/_logs.py +25 -0
- perplexity/_utils/_proxy.py +65 -0
- perplexity/_utils/_reflection.py +42 -0
- perplexity/_utils/_resources_proxy.py +24 -0
- perplexity/_utils/_streams.py +12 -0
- perplexity/_utils/_sync.py +58 -0
- perplexity/_utils/_transform.py +457 -0
- perplexity/_utils/_typing.py +156 -0
- perplexity/_utils/_utils.py +421 -0
- perplexity/_version.py +4 -0
- perplexity/lib/.keep +4 -0
- perplexity/py.typed +0 -0
- perplexity/resources/__init__.py +47 -0
- perplexity/resources/async_/__init__.py +33 -0
- perplexity/resources/async_/async_.py +102 -0
- perplexity/resources/async_/chat/__init__.py +33 -0
- perplexity/resources/async_/chat/chat.py +102 -0
- perplexity/resources/async_/chat/completions.py +359 -0
- perplexity/resources/chat/__init__.py +33 -0
- perplexity/resources/chat/chat.py +102 -0
- perplexity/resources/chat/completions.py +900 -0
- perplexity/resources/search.py +228 -0
- perplexity/types/__init__.py +14 -0
- perplexity/types/async_/__init__.py +3 -0
- perplexity/types/async_/chat/__init__.py +9 -0
- perplexity/types/async_/chat/completion_create_params.py +242 -0
- perplexity/types/async_/chat/completion_create_response.py +30 -0
- perplexity/types/async_/chat/completion_get_params.py +25 -0
- perplexity/types/async_/chat/completion_get_response.py +30 -0
- perplexity/types/async_/chat/completion_list_response.py +31 -0
- perplexity/types/chat/__init__.py +5 -0
- perplexity/types/chat/completion_create_params.py +244 -0
- perplexity/types/search_create_params.py +40 -0
- perplexity/types/search_create_response.py +27 -0
- perplexity/types/shared/__init__.py +7 -0
- perplexity/types/shared/api_public_search_result.py +22 -0
- perplexity/types/shared/chat_message_input.py +176 -0
- perplexity/types/shared/chat_message_output.py +176 -0
- perplexity/types/shared/choice.py +19 -0
- perplexity/types/shared/usage_info.py +41 -0
- perplexity/types/shared_params/__init__.py +4 -0
- perplexity/types/shared_params/api_public_search_result.py +22 -0
- perplexity/types/shared_params/chat_message_input.py +178 -0
- perplexity/types/stream_chunk.py +33 -0
- perplexityai-0.22.3.dist-info/METADATA +548 -0
- perplexityai-0.22.3.dist-info/RECORD +64 -0
- perplexityai-0.22.3.dist-info/WHEEL +4 -0
- perplexityai-0.22.3.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Union, Optional
|
|
6
|
+
from typing_extensions import Literal
|
|
7
|
+
|
|
8
|
+
import httpx
|
|
9
|
+
|
|
10
|
+
from ..types import search_create_params
|
|
11
|
+
from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
|
|
12
|
+
from .._utils import maybe_transform, async_maybe_transform
|
|
13
|
+
from .._compat import cached_property
|
|
14
|
+
from .._resource import SyncAPIResource, AsyncAPIResource
|
|
15
|
+
from .._response import (
|
|
16
|
+
to_raw_response_wrapper,
|
|
17
|
+
to_streamed_response_wrapper,
|
|
18
|
+
async_to_raw_response_wrapper,
|
|
19
|
+
async_to_streamed_response_wrapper,
|
|
20
|
+
)
|
|
21
|
+
from .._base_client import make_request_options
|
|
22
|
+
from ..types.search_create_response import SearchCreateResponse
|
|
23
|
+
|
|
24
|
+
__all__ = ["SearchResource", "AsyncSearchResource"]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class SearchResource(SyncAPIResource):
|
|
28
|
+
@cached_property
|
|
29
|
+
def with_raw_response(self) -> SearchResourceWithRawResponse:
|
|
30
|
+
"""
|
|
31
|
+
This property can be used as a prefix for any HTTP method call to return
|
|
32
|
+
the raw response object instead of the parsed content.
|
|
33
|
+
|
|
34
|
+
For more information, see https://www.github.com/perplexityai/perplexity-py#accessing-raw-response-data-eg-headers
|
|
35
|
+
"""
|
|
36
|
+
return SearchResourceWithRawResponse(self)
|
|
37
|
+
|
|
38
|
+
@cached_property
|
|
39
|
+
def with_streaming_response(self) -> SearchResourceWithStreamingResponse:
|
|
40
|
+
"""
|
|
41
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
|
42
|
+
|
|
43
|
+
For more information, see https://www.github.com/perplexityai/perplexity-py#with_streaming_response
|
|
44
|
+
"""
|
|
45
|
+
return SearchResourceWithStreamingResponse(self)
|
|
46
|
+
|
|
47
|
+
def create(
|
|
48
|
+
self,
|
|
49
|
+
*,
|
|
50
|
+
query: Union[str, SequenceNotStr[str]],
|
|
51
|
+
country: Optional[str] | Omit = omit,
|
|
52
|
+
display_server_time: bool | Omit = omit,
|
|
53
|
+
last_updated_after_filter: Optional[str] | Omit = omit,
|
|
54
|
+
last_updated_before_filter: Optional[str] | Omit = omit,
|
|
55
|
+
max_results: int | Omit = omit,
|
|
56
|
+
max_tokens: int | Omit = omit,
|
|
57
|
+
max_tokens_per_page: int | Omit = omit,
|
|
58
|
+
search_after_date_filter: Optional[str] | Omit = omit,
|
|
59
|
+
search_before_date_filter: Optional[str] | Omit = omit,
|
|
60
|
+
search_domain_filter: Optional[SequenceNotStr[str]] | Omit = omit,
|
|
61
|
+
search_language_filter: Optional[SequenceNotStr[str]] | Omit = omit,
|
|
62
|
+
search_mode: Optional[Literal["web", "academic", "sec"]] | Omit = omit,
|
|
63
|
+
search_recency_filter: Optional[Literal["hour", "day", "week", "month", "year"]] | Omit = omit,
|
|
64
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
65
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
66
|
+
extra_headers: Headers | None = None,
|
|
67
|
+
extra_query: Query | None = None,
|
|
68
|
+
extra_body: Body | None = None,
|
|
69
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
70
|
+
) -> SearchCreateResponse:
|
|
71
|
+
"""
|
|
72
|
+
Search the web and retrieve relevant web page contents.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
extra_headers: Send extra headers
|
|
76
|
+
|
|
77
|
+
extra_query: Add additional query parameters to the request
|
|
78
|
+
|
|
79
|
+
extra_body: Add additional JSON properties to the request
|
|
80
|
+
|
|
81
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
82
|
+
"""
|
|
83
|
+
return self._post(
|
|
84
|
+
"/search",
|
|
85
|
+
body=maybe_transform(
|
|
86
|
+
{
|
|
87
|
+
"query": query,
|
|
88
|
+
"country": country,
|
|
89
|
+
"display_server_time": display_server_time,
|
|
90
|
+
"last_updated_after_filter": last_updated_after_filter,
|
|
91
|
+
"last_updated_before_filter": last_updated_before_filter,
|
|
92
|
+
"max_results": max_results,
|
|
93
|
+
"max_tokens": max_tokens,
|
|
94
|
+
"max_tokens_per_page": max_tokens_per_page,
|
|
95
|
+
"search_after_date_filter": search_after_date_filter,
|
|
96
|
+
"search_before_date_filter": search_before_date_filter,
|
|
97
|
+
"search_domain_filter": search_domain_filter,
|
|
98
|
+
"search_language_filter": search_language_filter,
|
|
99
|
+
"search_mode": search_mode,
|
|
100
|
+
"search_recency_filter": search_recency_filter,
|
|
101
|
+
},
|
|
102
|
+
search_create_params.SearchCreateParams,
|
|
103
|
+
),
|
|
104
|
+
options=make_request_options(
|
|
105
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
106
|
+
),
|
|
107
|
+
cast_to=SearchCreateResponse,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class AsyncSearchResource(AsyncAPIResource):
|
|
112
|
+
@cached_property
|
|
113
|
+
def with_raw_response(self) -> AsyncSearchResourceWithRawResponse:
|
|
114
|
+
"""
|
|
115
|
+
This property can be used as a prefix for any HTTP method call to return
|
|
116
|
+
the raw response object instead of the parsed content.
|
|
117
|
+
|
|
118
|
+
For more information, see https://www.github.com/perplexityai/perplexity-py#accessing-raw-response-data-eg-headers
|
|
119
|
+
"""
|
|
120
|
+
return AsyncSearchResourceWithRawResponse(self)
|
|
121
|
+
|
|
122
|
+
@cached_property
|
|
123
|
+
def with_streaming_response(self) -> AsyncSearchResourceWithStreamingResponse:
|
|
124
|
+
"""
|
|
125
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
|
126
|
+
|
|
127
|
+
For more information, see https://www.github.com/perplexityai/perplexity-py#with_streaming_response
|
|
128
|
+
"""
|
|
129
|
+
return AsyncSearchResourceWithStreamingResponse(self)
|
|
130
|
+
|
|
131
|
+
async def create(
|
|
132
|
+
self,
|
|
133
|
+
*,
|
|
134
|
+
query: Union[str, SequenceNotStr[str]],
|
|
135
|
+
country: Optional[str] | Omit = omit,
|
|
136
|
+
display_server_time: bool | Omit = omit,
|
|
137
|
+
last_updated_after_filter: Optional[str] | Omit = omit,
|
|
138
|
+
last_updated_before_filter: Optional[str] | Omit = omit,
|
|
139
|
+
max_results: int | Omit = omit,
|
|
140
|
+
max_tokens: int | Omit = omit,
|
|
141
|
+
max_tokens_per_page: int | Omit = omit,
|
|
142
|
+
search_after_date_filter: Optional[str] | Omit = omit,
|
|
143
|
+
search_before_date_filter: Optional[str] | Omit = omit,
|
|
144
|
+
search_domain_filter: Optional[SequenceNotStr[str]] | Omit = omit,
|
|
145
|
+
search_language_filter: Optional[SequenceNotStr[str]] | Omit = omit,
|
|
146
|
+
search_mode: Optional[Literal["web", "academic", "sec"]] | Omit = omit,
|
|
147
|
+
search_recency_filter: Optional[Literal["hour", "day", "week", "month", "year"]] | Omit = omit,
|
|
148
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
149
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
150
|
+
extra_headers: Headers | None = None,
|
|
151
|
+
extra_query: Query | None = None,
|
|
152
|
+
extra_body: Body | None = None,
|
|
153
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
154
|
+
) -> SearchCreateResponse:
|
|
155
|
+
"""
|
|
156
|
+
Search the web and retrieve relevant web page contents.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
extra_headers: Send extra headers
|
|
160
|
+
|
|
161
|
+
extra_query: Add additional query parameters to the request
|
|
162
|
+
|
|
163
|
+
extra_body: Add additional JSON properties to the request
|
|
164
|
+
|
|
165
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
166
|
+
"""
|
|
167
|
+
return await self._post(
|
|
168
|
+
"/search",
|
|
169
|
+
body=await async_maybe_transform(
|
|
170
|
+
{
|
|
171
|
+
"query": query,
|
|
172
|
+
"country": country,
|
|
173
|
+
"display_server_time": display_server_time,
|
|
174
|
+
"last_updated_after_filter": last_updated_after_filter,
|
|
175
|
+
"last_updated_before_filter": last_updated_before_filter,
|
|
176
|
+
"max_results": max_results,
|
|
177
|
+
"max_tokens": max_tokens,
|
|
178
|
+
"max_tokens_per_page": max_tokens_per_page,
|
|
179
|
+
"search_after_date_filter": search_after_date_filter,
|
|
180
|
+
"search_before_date_filter": search_before_date_filter,
|
|
181
|
+
"search_domain_filter": search_domain_filter,
|
|
182
|
+
"search_language_filter": search_language_filter,
|
|
183
|
+
"search_mode": search_mode,
|
|
184
|
+
"search_recency_filter": search_recency_filter,
|
|
185
|
+
},
|
|
186
|
+
search_create_params.SearchCreateParams,
|
|
187
|
+
),
|
|
188
|
+
options=make_request_options(
|
|
189
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
190
|
+
),
|
|
191
|
+
cast_to=SearchCreateResponse,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
class SearchResourceWithRawResponse:
|
|
196
|
+
def __init__(self, search: SearchResource) -> None:
|
|
197
|
+
self._search = search
|
|
198
|
+
|
|
199
|
+
self.create = to_raw_response_wrapper(
|
|
200
|
+
search.create,
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
class AsyncSearchResourceWithRawResponse:
|
|
205
|
+
def __init__(self, search: AsyncSearchResource) -> None:
|
|
206
|
+
self._search = search
|
|
207
|
+
|
|
208
|
+
self.create = async_to_raw_response_wrapper(
|
|
209
|
+
search.create,
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
class SearchResourceWithStreamingResponse:
|
|
214
|
+
def __init__(self, search: SearchResource) -> None:
|
|
215
|
+
self._search = search
|
|
216
|
+
|
|
217
|
+
self.create = to_streamed_response_wrapper(
|
|
218
|
+
search.create,
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
class AsyncSearchResourceWithStreamingResponse:
|
|
223
|
+
def __init__(self, search: AsyncSearchResource) -> None:
|
|
224
|
+
self._search = search
|
|
225
|
+
|
|
226
|
+
self.create = async_to_streamed_response_wrapper(
|
|
227
|
+
search.create,
|
|
228
|
+
)
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .shared import (
|
|
6
|
+
Choice as Choice,
|
|
7
|
+
UsageInfo as UsageInfo,
|
|
8
|
+
ChatMessageInput as ChatMessageInput,
|
|
9
|
+
ChatMessageOutput as ChatMessageOutput,
|
|
10
|
+
APIPublicSearchResult as APIPublicSearchResult,
|
|
11
|
+
)
|
|
12
|
+
from .stream_chunk import StreamChunk as StreamChunk
|
|
13
|
+
from .search_create_params import SearchCreateParams as SearchCreateParams
|
|
14
|
+
from .search_create_response import SearchCreateResponse as SearchCreateResponse
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .completion_get_params import CompletionGetParams as CompletionGetParams
|
|
6
|
+
from .completion_get_response import CompletionGetResponse as CompletionGetResponse
|
|
7
|
+
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
|
|
8
|
+
from .completion_list_response import CompletionListResponse as CompletionListResponse
|
|
9
|
+
from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse
|
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Union, Iterable, Optional
|
|
6
|
+
from typing_extensions import Literal, Required, TypeAlias, TypedDict
|
|
7
|
+
|
|
8
|
+
from ...._types import SequenceNotStr
|
|
9
|
+
from ...shared_params.chat_message_input import ChatMessageInput
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"CompletionCreateParams",
|
|
13
|
+
"Request",
|
|
14
|
+
"RequestResponseFormat",
|
|
15
|
+
"RequestResponseFormatResponseFormatText",
|
|
16
|
+
"RequestResponseFormatResponseFormatJsonSchema",
|
|
17
|
+
"RequestResponseFormatResponseFormatJsonSchemaJsonSchema",
|
|
18
|
+
"RequestResponseFormatResponseFormatRegex",
|
|
19
|
+
"RequestResponseFormatResponseFormatRegexRegex",
|
|
20
|
+
"RequestTool",
|
|
21
|
+
"RequestToolFunction",
|
|
22
|
+
"RequestToolFunctionParameters",
|
|
23
|
+
"RequestWebSearchOptions",
|
|
24
|
+
"RequestWebSearchOptionsUserLocation",
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class CompletionCreateParams(TypedDict, total=False):
|
|
29
|
+
request: Required[Request]
|
|
30
|
+
|
|
31
|
+
idempotency_key: Optional[str]
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class RequestResponseFormatResponseFormatText(TypedDict, total=False):
|
|
35
|
+
type: Required[Literal["text"]]
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class RequestResponseFormatResponseFormatJsonSchemaJsonSchema(TypedDict, total=False):
|
|
39
|
+
schema: Required[Dict[str, object]]
|
|
40
|
+
|
|
41
|
+
description: Optional[str]
|
|
42
|
+
|
|
43
|
+
name: Optional[str]
|
|
44
|
+
|
|
45
|
+
strict: Optional[bool]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class RequestResponseFormatResponseFormatJsonSchema(TypedDict, total=False):
|
|
49
|
+
json_schema: Required[RequestResponseFormatResponseFormatJsonSchemaJsonSchema]
|
|
50
|
+
|
|
51
|
+
type: Required[Literal["json_schema"]]
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class RequestResponseFormatResponseFormatRegexRegex(TypedDict, total=False):
|
|
55
|
+
regex: Required[str]
|
|
56
|
+
|
|
57
|
+
description: Optional[str]
|
|
58
|
+
|
|
59
|
+
name: Optional[str]
|
|
60
|
+
|
|
61
|
+
strict: Optional[bool]
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class RequestResponseFormatResponseFormatRegex(TypedDict, total=False):
|
|
65
|
+
regex: Required[RequestResponseFormatResponseFormatRegexRegex]
|
|
66
|
+
|
|
67
|
+
type: Required[Literal["regex"]]
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
RequestResponseFormat: TypeAlias = Union[
|
|
71
|
+
RequestResponseFormatResponseFormatText,
|
|
72
|
+
RequestResponseFormatResponseFormatJsonSchema,
|
|
73
|
+
RequestResponseFormatResponseFormatRegex,
|
|
74
|
+
]
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class RequestToolFunctionParameters(TypedDict, total=False):
|
|
78
|
+
properties: Required[Dict[str, object]]
|
|
79
|
+
|
|
80
|
+
type: Required[str]
|
|
81
|
+
|
|
82
|
+
additional_properties: Optional[bool]
|
|
83
|
+
|
|
84
|
+
required: Optional[SequenceNotStr[str]]
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class RequestToolFunction(TypedDict, total=False):
|
|
88
|
+
description: Required[str]
|
|
89
|
+
|
|
90
|
+
name: Required[str]
|
|
91
|
+
|
|
92
|
+
parameters: Required[RequestToolFunctionParameters]
|
|
93
|
+
|
|
94
|
+
strict: Optional[bool]
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class RequestTool(TypedDict, total=False):
|
|
98
|
+
function: Required[RequestToolFunction]
|
|
99
|
+
|
|
100
|
+
type: Required[Literal["function"]]
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class RequestWebSearchOptionsUserLocation(TypedDict, total=False):
|
|
104
|
+
city: Optional[str]
|
|
105
|
+
|
|
106
|
+
country: Optional[str]
|
|
107
|
+
|
|
108
|
+
latitude: Optional[float]
|
|
109
|
+
|
|
110
|
+
longitude: Optional[float]
|
|
111
|
+
|
|
112
|
+
region: Optional[str]
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class RequestWebSearchOptions(TypedDict, total=False):
|
|
116
|
+
image_results_enhanced_relevance: bool
|
|
117
|
+
|
|
118
|
+
search_context_size: Literal["low", "medium", "high"]
|
|
119
|
+
|
|
120
|
+
search_type: Optional[Literal["fast", "pro", "auto"]]
|
|
121
|
+
|
|
122
|
+
user_location: Optional[RequestWebSearchOptionsUserLocation]
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class Request(TypedDict, total=False):
|
|
126
|
+
messages: Required[Iterable[ChatMessageInput]]
|
|
127
|
+
|
|
128
|
+
model: Required[str]
|
|
129
|
+
|
|
130
|
+
_debug_pro_search: bool
|
|
131
|
+
|
|
132
|
+
_force_new_agent: Optional[bool]
|
|
133
|
+
|
|
134
|
+
_inputs: Optional[Iterable[int]]
|
|
135
|
+
|
|
136
|
+
_prompt_token_length: Optional[int]
|
|
137
|
+
|
|
138
|
+
best_of: Optional[int]
|
|
139
|
+
|
|
140
|
+
country: Optional[str]
|
|
141
|
+
|
|
142
|
+
cum_logprobs: Optional[bool]
|
|
143
|
+
|
|
144
|
+
disable_search: Optional[bool]
|
|
145
|
+
|
|
146
|
+
diverse_first_token: Optional[bool]
|
|
147
|
+
|
|
148
|
+
enable_search_classifier: Optional[bool]
|
|
149
|
+
|
|
150
|
+
file_workspace_id: Optional[str]
|
|
151
|
+
|
|
152
|
+
frequency_penalty: Optional[float]
|
|
153
|
+
|
|
154
|
+
has_image_url: bool
|
|
155
|
+
|
|
156
|
+
image_domain_filter: Optional[SequenceNotStr[str]]
|
|
157
|
+
|
|
158
|
+
image_format_filter: Optional[SequenceNotStr[str]]
|
|
159
|
+
|
|
160
|
+
language_preference: Optional[str]
|
|
161
|
+
|
|
162
|
+
last_updated_after_filter: Optional[str]
|
|
163
|
+
|
|
164
|
+
last_updated_before_filter: Optional[str]
|
|
165
|
+
|
|
166
|
+
latitude: Optional[float]
|
|
167
|
+
|
|
168
|
+
logprobs: Optional[bool]
|
|
169
|
+
|
|
170
|
+
longitude: Optional[float]
|
|
171
|
+
|
|
172
|
+
max_tokens: Optional[int]
|
|
173
|
+
|
|
174
|
+
n: Optional[int]
|
|
175
|
+
|
|
176
|
+
num_images: int
|
|
177
|
+
|
|
178
|
+
num_search_results: int
|
|
179
|
+
|
|
180
|
+
parallel_tool_calls: Optional[bool]
|
|
181
|
+
|
|
182
|
+
presence_penalty: Optional[float]
|
|
183
|
+
|
|
184
|
+
ranking_model: Optional[str]
|
|
185
|
+
|
|
186
|
+
reasoning_effort: Optional[Literal["minimal", "low", "medium", "high"]]
|
|
187
|
+
|
|
188
|
+
response_format: Optional[RequestResponseFormat]
|
|
189
|
+
|
|
190
|
+
response_metadata: Optional[Dict[str, object]]
|
|
191
|
+
|
|
192
|
+
return_images: Optional[bool]
|
|
193
|
+
|
|
194
|
+
return_related_questions: Optional[bool]
|
|
195
|
+
|
|
196
|
+
safe_search: Optional[bool]
|
|
197
|
+
|
|
198
|
+
search_after_date_filter: Optional[str]
|
|
199
|
+
|
|
200
|
+
search_before_date_filter: Optional[str]
|
|
201
|
+
|
|
202
|
+
search_domain_filter: Optional[SequenceNotStr[str]]
|
|
203
|
+
|
|
204
|
+
search_internal_properties: Optional[Dict[str, object]]
|
|
205
|
+
|
|
206
|
+
search_language_filter: Optional[SequenceNotStr[str]]
|
|
207
|
+
|
|
208
|
+
search_mode: Optional[Literal["web", "academic", "sec"]]
|
|
209
|
+
|
|
210
|
+
search_recency_filter: Optional[Literal["hour", "day", "week", "month", "year"]]
|
|
211
|
+
|
|
212
|
+
search_tenant: Optional[str]
|
|
213
|
+
|
|
214
|
+
stop: Union[str, SequenceNotStr[str], None]
|
|
215
|
+
|
|
216
|
+
stream: Optional[bool]
|
|
217
|
+
|
|
218
|
+
stream_mode: Literal["full", "concise"]
|
|
219
|
+
|
|
220
|
+
temperature: Optional[float]
|
|
221
|
+
|
|
222
|
+
thread_id: Optional[str]
|
|
223
|
+
|
|
224
|
+
tool_choice: Optional[Literal["none", "auto", "required"]]
|
|
225
|
+
|
|
226
|
+
tools: Optional[Iterable[RequestTool]]
|
|
227
|
+
|
|
228
|
+
top_k: Optional[int]
|
|
229
|
+
|
|
230
|
+
top_logprobs: Optional[int]
|
|
231
|
+
|
|
232
|
+
top_p: Optional[float]
|
|
233
|
+
|
|
234
|
+
updated_after_timestamp: Optional[int]
|
|
235
|
+
|
|
236
|
+
updated_before_timestamp: Optional[int]
|
|
237
|
+
|
|
238
|
+
use_threads: Optional[bool]
|
|
239
|
+
|
|
240
|
+
user_original_query: Optional[str]
|
|
241
|
+
|
|
242
|
+
web_search_options: RequestWebSearchOptions
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
from typing_extensions import Literal
|
|
5
|
+
|
|
6
|
+
from ...._models import BaseModel
|
|
7
|
+
from ...stream_chunk import StreamChunk
|
|
8
|
+
|
|
9
|
+
__all__ = ["CompletionCreateResponse"]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class CompletionCreateResponse(BaseModel):
|
|
13
|
+
id: str
|
|
14
|
+
|
|
15
|
+
created_at: int
|
|
16
|
+
|
|
17
|
+
model: str
|
|
18
|
+
|
|
19
|
+
status: Literal["CREATED", "IN_PROGRESS", "COMPLETED", "FAILED"]
|
|
20
|
+
"""Status enum for async processing."""
|
|
21
|
+
|
|
22
|
+
completed_at: Optional[int] = None
|
|
23
|
+
|
|
24
|
+
error_message: Optional[str] = None
|
|
25
|
+
|
|
26
|
+
failed_at: Optional[int] = None
|
|
27
|
+
|
|
28
|
+
response: Optional[StreamChunk] = None
|
|
29
|
+
|
|
30
|
+
started_at: Optional[int] = None
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing_extensions import Annotated, TypedDict
|
|
6
|
+
|
|
7
|
+
from ...._utils import PropertyInfo
|
|
8
|
+
|
|
9
|
+
__all__ = ["CompletionGetParams"]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class CompletionGetParams(TypedDict, total=False):
|
|
13
|
+
local_mode: bool
|
|
14
|
+
|
|
15
|
+
x_client_env: Annotated[str, PropertyInfo(alias="x-client-env")]
|
|
16
|
+
|
|
17
|
+
x_client_name: Annotated[str, PropertyInfo(alias="x-client-name")]
|
|
18
|
+
|
|
19
|
+
x_created_at_epoch_seconds: Annotated[str, PropertyInfo(alias="x-created-at-epoch-seconds")]
|
|
20
|
+
|
|
21
|
+
x_request_time: Annotated[str, PropertyInfo(alias="x-request-time")]
|
|
22
|
+
|
|
23
|
+
x_usage_tier: Annotated[str, PropertyInfo(alias="x-usage-tier")]
|
|
24
|
+
|
|
25
|
+
x_user_id: Annotated[str, PropertyInfo(alias="x-user-id")]
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
from typing_extensions import Literal
|
|
5
|
+
|
|
6
|
+
from ...._models import BaseModel
|
|
7
|
+
from ...stream_chunk import StreamChunk
|
|
8
|
+
|
|
9
|
+
__all__ = ["CompletionGetResponse"]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class CompletionGetResponse(BaseModel):
|
|
13
|
+
id: str
|
|
14
|
+
|
|
15
|
+
created_at: int
|
|
16
|
+
|
|
17
|
+
model: str
|
|
18
|
+
|
|
19
|
+
status: Literal["CREATED", "IN_PROGRESS", "COMPLETED", "FAILED"]
|
|
20
|
+
"""Status enum for async processing."""
|
|
21
|
+
|
|
22
|
+
completed_at: Optional[int] = None
|
|
23
|
+
|
|
24
|
+
error_message: Optional[str] = None
|
|
25
|
+
|
|
26
|
+
failed_at: Optional[int] = None
|
|
27
|
+
|
|
28
|
+
response: Optional[StreamChunk] = None
|
|
29
|
+
|
|
30
|
+
started_at: Optional[int] = None
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List, Optional
|
|
4
|
+
from typing_extensions import Literal
|
|
5
|
+
|
|
6
|
+
from ...._models import BaseModel
|
|
7
|
+
|
|
8
|
+
__all__ = ["CompletionListResponse", "Request"]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Request(BaseModel):
|
|
12
|
+
id: str
|
|
13
|
+
|
|
14
|
+
created_at: int
|
|
15
|
+
|
|
16
|
+
model: str
|
|
17
|
+
|
|
18
|
+
status: Literal["CREATED", "IN_PROGRESS", "COMPLETED", "FAILED"]
|
|
19
|
+
"""Status enum for async processing."""
|
|
20
|
+
|
|
21
|
+
completed_at: Optional[int] = None
|
|
22
|
+
|
|
23
|
+
failed_at: Optional[int] = None
|
|
24
|
+
|
|
25
|
+
started_at: Optional[int] = None
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class CompletionListResponse(BaseModel):
|
|
29
|
+
requests: List[Request]
|
|
30
|
+
|
|
31
|
+
next_token: Optional[str] = None
|