perplexityai 0.12.1__py3-none-any.whl → 0.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of perplexityai might be problematic. Click here for more details.

perplexity/_client.py CHANGED
@@ -105,6 +105,8 @@ class Perplexity(SyncAPIClient):
105
105
  _strict_response_validation=_strict_response_validation,
106
106
  )
107
107
 
108
+ self._default_stream_cls = Stream
109
+
108
110
  self.chat = chat.ChatResource(self)
109
111
  self.async_ = async_.AsyncResource(self)
110
112
  self.search = search.SearchResource(self)
@@ -277,6 +279,8 @@ class AsyncPerplexity(AsyncAPIClient):
277
279
  _strict_response_validation=_strict_response_validation,
278
280
  )
279
281
 
282
+ self._default_stream_cls = AsyncStream
283
+
280
284
  self.chat = chat.AsyncChatResource(self)
281
285
  self.async_ = async_.AsyncAsyncResource(self)
282
286
  self.search = search.AsyncSearchResource(self)
perplexity/_streaming.py CHANGED
@@ -55,7 +55,26 @@ class Stream(Generic[_T]):
55
55
  iterator = self._iter_events()
56
56
 
57
57
  for sse in iterator:
58
- yield process_data(data=sse.json(), cast_to=cast_to, response=response)
58
+ if sse.data.startswith("[DONE]"):
59
+ break
60
+
61
+ if sse.event == "error":
62
+ body = sse.data
63
+
64
+ try:
65
+ body = sse.json()
66
+ err_msg = f"{body}"
67
+ except Exception:
68
+ err_msg = sse.data or f"Error code: {response.status_code}"
69
+
70
+ raise self._client._make_status_error(
71
+ err_msg,
72
+ body=body,
73
+ response=self.response,
74
+ )
75
+
76
+ if sse.event is None:
77
+ yield process_data(data=sse.json(), cast_to=cast_to, response=response)
59
78
 
60
79
  # Ensure the entire stream is consumed
61
80
  for _sse in iterator:
@@ -119,7 +138,26 @@ class AsyncStream(Generic[_T]):
119
138
  iterator = self._iter_events()
120
139
 
121
140
  async for sse in iterator:
122
- yield process_data(data=sse.json(), cast_to=cast_to, response=response)
141
+ if sse.data.startswith("[DONE]"):
142
+ break
143
+
144
+ if sse.event == "error":
145
+ body = sse.data
146
+
147
+ try:
148
+ body = sse.json()
149
+ err_msg = f"{body}"
150
+ except Exception:
151
+ err_msg = sse.data or f"Error code: {response.status_code}"
152
+
153
+ raise self._client._make_status_error(
154
+ err_msg,
155
+ body=body,
156
+ response=self.response,
157
+ )
158
+
159
+ if sse.event is None:
160
+ yield process_data(data=sse.json(), cast_to=cast_to, response=response)
123
161
 
124
162
  # Ensure the entire stream is consumed
125
163
  async for _sse in iterator:
perplexity/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "perplexity"
4
- __version__ = "0.12.1" # x-release-please-version
4
+ __version__ = "0.14.0" # x-release-please-version
@@ -58,10 +58,7 @@ class CompletionsResource(SyncAPIResource):
58
58
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
59
59
  ) -> CompletionCreateResponse:
60
60
  """
61
- FastAPI wrapper around async chat completions
62
-
63
- This endpoint creates an asynchronous chat completion job and returns a job ID
64
- that can be used to poll for results.
61
+ Submit an asynchronous chat completion request.
65
62
 
66
63
  Args:
67
64
  extra_headers: Send extra headers
@@ -97,7 +94,7 @@ class CompletionsResource(SyncAPIResource):
97
94
  extra_body: Body | None = None,
98
95
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
99
96
  ) -> CompletionListResponse:
100
- """list all async chat completion requests for a given user."""
97
+ """Retrieve a list of all asynchronous chat completion requests for a given user."""
101
98
  return self._get(
102
99
  "/async/chat/completions",
103
100
  options=make_request_options(
@@ -113,6 +110,7 @@ class CompletionsResource(SyncAPIResource):
113
110
  local_mode: bool | Omit = omit,
114
111
  x_client_env: str | Omit = omit,
115
112
  x_client_name: str | Omit = omit,
113
+ x_created_at_epoch_seconds: str | Omit = omit,
116
114
  x_request_time: str | Omit = omit,
117
115
  x_usage_tier: str | Omit = omit,
118
116
  x_user_id: str | Omit = omit,
@@ -124,7 +122,7 @@ class CompletionsResource(SyncAPIResource):
124
122
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
125
123
  ) -> CompletionGetResponse:
126
124
  """
127
- get the response for a given async chat completion request.
125
+ Retrieve the response for a given asynchronous chat completion request.
128
126
 
129
127
  Args:
130
128
  extra_headers: Send extra headers
@@ -142,6 +140,7 @@ class CompletionsResource(SyncAPIResource):
142
140
  {
143
141
  "x-client-env": x_client_env,
144
142
  "x-client-name": x_client_name,
143
+ "x-created-at-epoch-seconds": x_created_at_epoch_seconds,
145
144
  "x-request-time": x_request_time,
146
145
  "x-usage-tier": x_usage_tier,
147
146
  "x-user-id": x_user_id,
@@ -195,10 +194,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
195
194
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
196
195
  ) -> CompletionCreateResponse:
197
196
  """
198
- FastAPI wrapper around async chat completions
199
-
200
- This endpoint creates an asynchronous chat completion job and returns a job ID
201
- that can be used to poll for results.
197
+ Submit an asynchronous chat completion request.
202
198
 
203
199
  Args:
204
200
  extra_headers: Send extra headers
@@ -234,7 +230,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
234
230
  extra_body: Body | None = None,
235
231
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
236
232
  ) -> CompletionListResponse:
237
- """list all async chat completion requests for a given user."""
233
+ """Retrieve a list of all asynchronous chat completion requests for a given user."""
238
234
  return await self._get(
239
235
  "/async/chat/completions",
240
236
  options=make_request_options(
@@ -250,6 +246,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
250
246
  local_mode: bool | Omit = omit,
251
247
  x_client_env: str | Omit = omit,
252
248
  x_client_name: str | Omit = omit,
249
+ x_created_at_epoch_seconds: str | Omit = omit,
253
250
  x_request_time: str | Omit = omit,
254
251
  x_usage_tier: str | Omit = omit,
255
252
  x_user_id: str | Omit = omit,
@@ -261,7 +258,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
261
258
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
262
259
  ) -> CompletionGetResponse:
263
260
  """
264
- get the response for a given async chat completion request.
261
+ Retrieve the response for a given asynchronous chat completion request.
265
262
 
266
263
  Args:
267
264
  extra_headers: Send extra headers
@@ -279,6 +276,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
279
276
  {
280
277
  "x-client-env": x_client_env,
281
278
  "x-client-name": x_client_name,
279
+ "x-created-at-epoch-seconds": x_created_at_epoch_seconds,
282
280
  "x-request-time": x_request_time,
283
281
  "x-usage-tier": x_usage_tier,
284
282
  "x-user-id": x_user_id,