relaxai 0.1.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of relaxai might be problematic. Click here for more details.

Files changed (31) hide show
  1. relaxai/_base_client.py +4 -1
  2. relaxai/_client.py +77 -40
  3. relaxai/_files.py +4 -4
  4. relaxai/_models.py +1 -1
  5. relaxai/_version.py +1 -1
  6. relaxai/resources/__init__.py +0 -14
  7. relaxai/resources/chat.py +46 -21
  8. relaxai/resources/embeddings.py +18 -18
  9. relaxai/resources/models.py +57 -57
  10. relaxai/types/__init__.py +11 -6
  11. relaxai/types/chat_completion_message.py +29 -3
  12. relaxai/types/chat_completion_message_param.py +29 -4
  13. relaxai/types/{chat_create_completion_response.py → chat_completion_response.py} +4 -4
  14. relaxai/types/chat_create_completion_params.py +62 -10
  15. relaxai/types/{embedding_create_params.py → embedding_create_embedding_params.py} +2 -2
  16. relaxai/types/{embedding_create_response.py → embedding_response.py} +4 -4
  17. relaxai/types/function_definition_param.py +6 -2
  18. relaxai/types/{health_check_response.py → health_response.py} +2 -2
  19. relaxai/types/{model_list_response.py → model_list.py} +2 -2
  20. relaxai/types/shared/__init__.py +5 -0
  21. relaxai/types/shared/openai_completion_tokens_details.py +15 -0
  22. relaxai/types/shared/openai_prompt_tokens_details.py +11 -0
  23. relaxai/types/shared/openai_usage.py +19 -0
  24. relaxai/types/stream_options_param.py +11 -0
  25. {relaxai-0.1.0.dist-info → relaxai-0.2.1.dist-info}/METADATA +44 -39
  26. relaxai-0.2.1.dist-info/RECORD +53 -0
  27. relaxai/resources/health.py +0 -134
  28. relaxai/types/usage.py +0 -33
  29. relaxai-0.1.0.dist-info/RECORD +0 -50
  30. {relaxai-0.1.0.dist-info → relaxai-0.2.1.dist-info}/WHEEL +0 -0
  31. {relaxai-0.1.0.dist-info → relaxai-0.2.1.dist-info}/licenses/LICENSE +0 -0
relaxai/_base_client.py CHANGED
@@ -532,7 +532,10 @@ class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]):
532
532
  is_body_allowed = options.method.lower() != "get"
533
533
 
534
534
  if is_body_allowed:
535
- kwargs["json"] = json_data if is_given(json_data) else None
535
+ if isinstance(json_data, bytes):
536
+ kwargs["content"] = json_data
537
+ else:
538
+ kwargs["json"] = json_data if is_given(json_data) else None
536
539
  kwargs["files"] = files
537
540
  else:
538
541
  headers.pop("Content-Type", None)
relaxai/_client.py CHANGED
@@ -12,7 +12,9 @@ from . import _exceptions
12
12
  from ._qs import Querystring
13
13
  from ._types import (
14
14
  NOT_GIVEN,
15
+ Body,
15
16
  Omit,
17
+ Query,
16
18
  Headers,
17
19
  Timeout,
18
20
  NotGiven,
@@ -22,13 +24,20 @@ from ._types import (
22
24
  )
23
25
  from ._utils import is_given, get_async_library
24
26
  from ._version import __version__
25
- from .resources import chat, health, models, embeddings
27
+ from ._response import (
28
+ to_raw_response_wrapper,
29
+ to_streamed_response_wrapper,
30
+ async_to_raw_response_wrapper,
31
+ async_to_streamed_response_wrapper,
32
+ )
33
+ from .resources import chat, models, embeddings
26
34
  from ._streaming import Stream as Stream, AsyncStream as AsyncStream
27
- from ._exceptions import APIStatusError
35
+ from ._exceptions import RelaxaiError, APIStatusError
28
36
  from ._base_client import (
29
37
  DEFAULT_MAX_RETRIES,
30
38
  SyncAPIClient,
31
39
  AsyncAPIClient,
40
+ make_request_options,
32
41
  )
33
42
 
34
43
  __all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "Relaxai", "AsyncRelaxai", "Client", "AsyncClient"]
@@ -37,13 +46,12 @@ __all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "Relaxai",
37
46
  class Relaxai(SyncAPIClient):
38
47
  chat: chat.ChatResource
39
48
  embeddings: embeddings.EmbeddingsResource
40
- health: health.HealthResource
41
49
  models: models.ModelsResource
42
50
  with_raw_response: RelaxaiWithRawResponse
43
51
  with_streaming_response: RelaxaiWithStreamedResponse
44
52
 
45
53
  # client options
46
- api_key: str | None
54
+ api_key: str
47
55
 
48
56
  def __init__(
49
57
  self,
@@ -74,12 +82,16 @@ class Relaxai(SyncAPIClient):
74
82
  """
75
83
  if api_key is None:
76
84
  api_key = os.environ.get("RELAXAI_API_KEY")
85
+ if api_key is None:
86
+ raise RelaxaiError(
87
+ "The api_key client option must be set either by passing api_key to the client or by setting the RELAXAI_API_KEY environment variable"
88
+ )
77
89
  self.api_key = api_key
78
90
 
79
91
  if base_url is None:
80
92
  base_url = os.environ.get("RELAXAI_BASE_URL")
81
93
  if base_url is None:
82
- base_url = f"http://127.0.0.1"
94
+ base_url = f"https://api.relax.ai"
83
95
 
84
96
  super().__init__(
85
97
  version=__version__,
@@ -94,7 +106,6 @@ class Relaxai(SyncAPIClient):
94
106
 
95
107
  self.chat = chat.ChatResource(self)
96
108
  self.embeddings = embeddings.EmbeddingsResource(self)
97
- self.health = health.HealthResource(self)
98
109
  self.models = models.ModelsResource(self)
99
110
  self.with_raw_response = RelaxaiWithRawResponse(self)
100
111
  self.with_streaming_response = RelaxaiWithStreamedResponse(self)
@@ -108,8 +119,6 @@ class Relaxai(SyncAPIClient):
108
119
  @override
109
120
  def auth_headers(self) -> dict[str, str]:
110
121
  api_key = self.api_key
111
- if api_key is None:
112
- return {}
113
122
  return {"Authorization": f"Bearer {api_key}"}
114
123
 
115
124
  @property
@@ -121,17 +130,6 @@ class Relaxai(SyncAPIClient):
121
130
  **self._custom_headers,
122
131
  }
123
132
 
124
- @override
125
- def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
126
- if self.api_key and headers.get("Authorization"):
127
- return
128
- if isinstance(custom_headers.get("Authorization"), Omit):
129
- return
130
-
131
- raise TypeError(
132
- '"Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted"'
133
- )
134
-
135
133
  def copy(
136
134
  self,
137
135
  *,
@@ -183,6 +181,25 @@ class Relaxai(SyncAPIClient):
183
181
  # client.with_options(timeout=10).foo.create(...)
184
182
  with_options = copy
185
183
 
184
+ def health(
185
+ self,
186
+ *,
187
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
188
+ # The extra values given here take precedence over values defined on the client or passed to this method.
189
+ extra_headers: Headers | None = None,
190
+ extra_query: Query | None = None,
191
+ extra_body: Body | None = None,
192
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
193
+ ) -> str:
194
+ """Check the health of the service."""
195
+ return self.get(
196
+ "/v1/health",
197
+ options=make_request_options(
198
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
199
+ ),
200
+ cast_to=str,
201
+ )
202
+
186
203
  @override
187
204
  def _make_status_error(
188
205
  self,
@@ -220,13 +237,12 @@ class Relaxai(SyncAPIClient):
220
237
  class AsyncRelaxai(AsyncAPIClient):
221
238
  chat: chat.AsyncChatResource
222
239
  embeddings: embeddings.AsyncEmbeddingsResource
223
- health: health.AsyncHealthResource
224
240
  models: models.AsyncModelsResource
225
241
  with_raw_response: AsyncRelaxaiWithRawResponse
226
242
  with_streaming_response: AsyncRelaxaiWithStreamedResponse
227
243
 
228
244
  # client options
229
- api_key: str | None
245
+ api_key: str
230
246
 
231
247
  def __init__(
232
248
  self,
@@ -257,12 +273,16 @@ class AsyncRelaxai(AsyncAPIClient):
257
273
  """
258
274
  if api_key is None:
259
275
  api_key = os.environ.get("RELAXAI_API_KEY")
276
+ if api_key is None:
277
+ raise RelaxaiError(
278
+ "The api_key client option must be set either by passing api_key to the client or by setting the RELAXAI_API_KEY environment variable"
279
+ )
260
280
  self.api_key = api_key
261
281
 
262
282
  if base_url is None:
263
283
  base_url = os.environ.get("RELAXAI_BASE_URL")
264
284
  if base_url is None:
265
- base_url = f"http://127.0.0.1"
285
+ base_url = f"https://api.relax.ai"
266
286
 
267
287
  super().__init__(
268
288
  version=__version__,
@@ -277,7 +297,6 @@ class AsyncRelaxai(AsyncAPIClient):
277
297
 
278
298
  self.chat = chat.AsyncChatResource(self)
279
299
  self.embeddings = embeddings.AsyncEmbeddingsResource(self)
280
- self.health = health.AsyncHealthResource(self)
281
300
  self.models = models.AsyncModelsResource(self)
282
301
  self.with_raw_response = AsyncRelaxaiWithRawResponse(self)
283
302
  self.with_streaming_response = AsyncRelaxaiWithStreamedResponse(self)
@@ -291,8 +310,6 @@ class AsyncRelaxai(AsyncAPIClient):
291
310
  @override
292
311
  def auth_headers(self) -> dict[str, str]:
293
312
  api_key = self.api_key
294
- if api_key is None:
295
- return {}
296
313
  return {"Authorization": f"Bearer {api_key}"}
297
314
 
298
315
  @property
@@ -304,17 +321,6 @@ class AsyncRelaxai(AsyncAPIClient):
304
321
  **self._custom_headers,
305
322
  }
306
323
 
307
- @override
308
- def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
309
- if self.api_key and headers.get("Authorization"):
310
- return
311
- if isinstance(custom_headers.get("Authorization"), Omit):
312
- return
313
-
314
- raise TypeError(
315
- '"Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted"'
316
- )
317
-
318
324
  def copy(
319
325
  self,
320
326
  *,
@@ -366,6 +372,25 @@ class AsyncRelaxai(AsyncAPIClient):
366
372
  # client.with_options(timeout=10).foo.create(...)
367
373
  with_options = copy
368
374
 
375
+ async def health(
376
+ self,
377
+ *,
378
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
379
+ # The extra values given here take precedence over values defined on the client or passed to this method.
380
+ extra_headers: Headers | None = None,
381
+ extra_query: Query | None = None,
382
+ extra_body: Body | None = None,
383
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
384
+ ) -> str:
385
+ """Check the health of the service."""
386
+ return await self.get(
387
+ "/v1/health",
388
+ options=make_request_options(
389
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
390
+ ),
391
+ cast_to=str,
392
+ )
393
+
369
394
  @override
370
395
  def _make_status_error(
371
396
  self,
@@ -404,33 +429,45 @@ class RelaxaiWithRawResponse:
404
429
  def __init__(self, client: Relaxai) -> None:
405
430
  self.chat = chat.ChatResourceWithRawResponse(client.chat)
406
431
  self.embeddings = embeddings.EmbeddingsResourceWithRawResponse(client.embeddings)
407
- self.health = health.HealthResourceWithRawResponse(client.health)
408
432
  self.models = models.ModelsResourceWithRawResponse(client.models)
409
433
 
434
+ self.health = to_raw_response_wrapper(
435
+ client.health,
436
+ )
437
+
410
438
 
411
439
  class AsyncRelaxaiWithRawResponse:
412
440
  def __init__(self, client: AsyncRelaxai) -> None:
413
441
  self.chat = chat.AsyncChatResourceWithRawResponse(client.chat)
414
442
  self.embeddings = embeddings.AsyncEmbeddingsResourceWithRawResponse(client.embeddings)
415
- self.health = health.AsyncHealthResourceWithRawResponse(client.health)
416
443
  self.models = models.AsyncModelsResourceWithRawResponse(client.models)
417
444
 
445
+ self.health = async_to_raw_response_wrapper(
446
+ client.health,
447
+ )
448
+
418
449
 
419
450
  class RelaxaiWithStreamedResponse:
420
451
  def __init__(self, client: Relaxai) -> None:
421
452
  self.chat = chat.ChatResourceWithStreamingResponse(client.chat)
422
453
  self.embeddings = embeddings.EmbeddingsResourceWithStreamingResponse(client.embeddings)
423
- self.health = health.HealthResourceWithStreamingResponse(client.health)
424
454
  self.models = models.ModelsResourceWithStreamingResponse(client.models)
425
455
 
456
+ self.health = to_streamed_response_wrapper(
457
+ client.health,
458
+ )
459
+
426
460
 
427
461
  class AsyncRelaxaiWithStreamedResponse:
428
462
  def __init__(self, client: AsyncRelaxai) -> None:
429
463
  self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat)
430
464
  self.embeddings = embeddings.AsyncEmbeddingsResourceWithStreamingResponse(client.embeddings)
431
- self.health = health.AsyncHealthResourceWithStreamingResponse(client.health)
432
465
  self.models = models.AsyncModelsResourceWithStreamingResponse(client.models)
433
466
 
467
+ self.health = async_to_streamed_response_wrapper(
468
+ client.health,
469
+ )
470
+
434
471
 
435
472
  Client = Relaxai
436
473
 
relaxai/_files.py CHANGED
@@ -69,12 +69,12 @@ def _transform_file(file: FileTypes) -> HttpxFileTypes:
69
69
  return file
70
70
 
71
71
  if is_tuple_t(file):
72
- return (file[0], _read_file_content(file[1]), *file[2:])
72
+ return (file[0], read_file_content(file[1]), *file[2:])
73
73
 
74
74
  raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
75
75
 
76
76
 
77
- def _read_file_content(file: FileContent) -> HttpxFileContent:
77
+ def read_file_content(file: FileContent) -> HttpxFileContent:
78
78
  if isinstance(file, os.PathLike):
79
79
  return pathlib.Path(file).read_bytes()
80
80
  return file
@@ -111,12 +111,12 @@ async def _async_transform_file(file: FileTypes) -> HttpxFileTypes:
111
111
  return file
112
112
 
113
113
  if is_tuple_t(file):
114
- return (file[0], await _async_read_file_content(file[1]), *file[2:])
114
+ return (file[0], await async_read_file_content(file[1]), *file[2:])
115
115
 
116
116
  raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
117
117
 
118
118
 
119
- async def _async_read_file_content(file: FileContent) -> HttpxFileContent:
119
+ async def async_read_file_content(file: FileContent) -> HttpxFileContent:
120
120
  if isinstance(file, os.PathLike):
121
121
  return await anyio.Path(file).read_bytes()
122
122
 
relaxai/_models.py CHANGED
@@ -304,7 +304,7 @@ class BaseModel(pydantic.BaseModel):
304
304
  exclude_none=exclude_none,
305
305
  )
306
306
 
307
- return cast(dict[str, Any], json_safe(dumped)) if mode == "json" else dumped
307
+ return cast("dict[str, Any]", json_safe(dumped)) if mode == "json" else dumped
308
308
 
309
309
  @override
310
310
  def model_dump_json(
relaxai/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "relaxai"
4
- __version__ = "0.1.0" # x-release-please-version
4
+ __version__ = "0.2.1" # x-release-please-version
@@ -8,14 +8,6 @@ from .chat import (
8
8
  ChatResourceWithStreamingResponse,
9
9
  AsyncChatResourceWithStreamingResponse,
10
10
  )
11
- from .health import (
12
- HealthResource,
13
- AsyncHealthResource,
14
- HealthResourceWithRawResponse,
15
- AsyncHealthResourceWithRawResponse,
16
- HealthResourceWithStreamingResponse,
17
- AsyncHealthResourceWithStreamingResponse,
18
- )
19
11
  from .models import (
20
12
  ModelsResource,
21
13
  AsyncModelsResource,
@@ -46,12 +38,6 @@ __all__ = [
46
38
  "AsyncEmbeddingsResourceWithRawResponse",
47
39
  "EmbeddingsResourceWithStreamingResponse",
48
40
  "AsyncEmbeddingsResourceWithStreamingResponse",
49
- "HealthResource",
50
- "AsyncHealthResource",
51
- "HealthResourceWithRawResponse",
52
- "AsyncHealthResourceWithRawResponse",
53
- "HealthResourceWithStreamingResponse",
54
- "AsyncHealthResourceWithStreamingResponse",
55
41
  "ModelsResource",
56
42
  "AsyncModelsResource",
57
43
  "ModelsResourceWithRawResponse",
relaxai/resources/chat.py CHANGED
@@ -18,9 +18,10 @@ from .._response import (
18
18
  async_to_streamed_response_wrapper,
19
19
  )
20
20
  from .._base_client import make_request_options
21
+ from ..types.stream_options_param import StreamOptionsParam
22
+ from ..types.chat_completion_response import ChatCompletionResponse
21
23
  from ..types.function_definition_param import FunctionDefinitionParam
22
24
  from ..types.chat_completion_message_param import ChatCompletionMessageParam
23
- from ..types.chat_create_completion_response import ChatCreateCompletionResponse
24
25
 
25
26
  __all__ = ["ChatResource", "AsyncChatResource"]
26
27
 
@@ -50,9 +51,10 @@ class ChatResource(SyncAPIResource):
50
51
  *,
51
52
  messages: Iterable[ChatCompletionMessageParam],
52
53
  model: str,
53
- chat_template_kwargs: object | NotGiven = NOT_GIVEN,
54
+ query_stream: bool | NotGiven = NOT_GIVEN,
55
+ chat_template_kwargs: chat_create_completion_params.ChatTemplateKwargs | NotGiven = NOT_GIVEN,
54
56
  frequency_penalty: float | NotGiven = NOT_GIVEN,
55
- function_call: object | NotGiven = NOT_GIVEN,
57
+ function_call: chat_create_completion_params.FunctionCall | NotGiven = NOT_GIVEN,
56
58
  functions: Iterable[FunctionDefinitionParam] | NotGiven = NOT_GIVEN,
57
59
  logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN,
58
60
  logprobs: bool | NotGiven = NOT_GIVEN,
@@ -60,7 +62,7 @@ class ChatResource(SyncAPIResource):
60
62
  max_tokens: int | NotGiven = NOT_GIVEN,
61
63
  metadata: Dict[str, str] | NotGiven = NOT_GIVEN,
62
64
  n: int | NotGiven = NOT_GIVEN,
63
- parallel_tool_calls: object | NotGiven = NOT_GIVEN,
65
+ parallel_tool_calls: chat_create_completion_params.ParallelToolCalls | NotGiven = NOT_GIVEN,
64
66
  prediction: chat_create_completion_params.Prediction | NotGiven = NOT_GIVEN,
65
67
  presence_penalty: float | NotGiven = NOT_GIVEN,
66
68
  reasoning_effort: str | NotGiven = NOT_GIVEN,
@@ -68,25 +70,29 @@ class ChatResource(SyncAPIResource):
68
70
  seed: int | NotGiven = NOT_GIVEN,
69
71
  stop: List[str] | NotGiven = NOT_GIVEN,
70
72
  store: bool | NotGiven = NOT_GIVEN,
71
- stream: bool | NotGiven = NOT_GIVEN,
72
- stream_options: chat_create_completion_params.StreamOptions | NotGiven = NOT_GIVEN,
73
+ body_stream: bool | NotGiven = NOT_GIVEN,
74
+ stream_options: StreamOptionsParam | NotGiven = NOT_GIVEN,
73
75
  temperature: float | NotGiven = NOT_GIVEN,
74
- tool_choice: object | NotGiven = NOT_GIVEN,
76
+ tool_choice: chat_create_completion_params.ToolChoice | NotGiven = NOT_GIVEN,
75
77
  tools: Iterable[chat_create_completion_params.Tool] | NotGiven = NOT_GIVEN,
76
78
  top_logprobs: int | NotGiven = NOT_GIVEN,
77
79
  top_p: float | NotGiven = NOT_GIVEN,
78
80
  user: str | NotGiven = NOT_GIVEN,
81
+ web_search_options: chat_create_completion_params.WebSearchOptions | NotGiven = NOT_GIVEN,
79
82
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
80
83
  # The extra values given here take precedence over values defined on the client or passed to this method.
81
84
  extra_headers: Headers | None = None,
82
85
  extra_query: Query | None = None,
83
86
  extra_body: Body | None = None,
84
87
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
85
- ) -> ChatCreateCompletionResponse:
88
+ ) -> ChatCompletionResponse:
86
89
  """
87
90
  Creates a chat completion for the given model
88
91
 
89
92
  Args:
93
+ query_stream: If true, server responds as an SSE stream. Generators may produce an ergonomic
94
+ streaming method when this is set.
95
+
90
96
  extra_headers: Send extra headers
91
97
 
92
98
  extra_query: Add additional query parameters to the request
@@ -119,7 +125,7 @@ class ChatResource(SyncAPIResource):
119
125
  "seed": seed,
120
126
  "stop": stop,
121
127
  "store": store,
122
- "stream": stream,
128
+ "body_stream": body_stream,
123
129
  "stream_options": stream_options,
124
130
  "temperature": temperature,
125
131
  "tool_choice": tool_choice,
@@ -127,13 +133,20 @@ class ChatResource(SyncAPIResource):
127
133
  "top_logprobs": top_logprobs,
128
134
  "top_p": top_p,
129
135
  "user": user,
136
+ "web_search_options": web_search_options,
130
137
  },
131
138
  chat_create_completion_params.ChatCreateCompletionParams,
132
139
  ),
133
140
  options=make_request_options(
134
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
141
+ extra_headers=extra_headers,
142
+ extra_query=extra_query,
143
+ extra_body=extra_body,
144
+ timeout=timeout,
145
+ query=maybe_transform(
146
+ {"query_stream": query_stream}, chat_create_completion_params.ChatCreateCompletionParams
147
+ ),
135
148
  ),
136
- cast_to=ChatCreateCompletionResponse,
149
+ cast_to=ChatCompletionResponse,
137
150
  )
138
151
 
139
152
 
@@ -162,9 +175,10 @@ class AsyncChatResource(AsyncAPIResource):
162
175
  *,
163
176
  messages: Iterable[ChatCompletionMessageParam],
164
177
  model: str,
165
- chat_template_kwargs: object | NotGiven = NOT_GIVEN,
178
+ query_stream: bool | NotGiven = NOT_GIVEN,
179
+ chat_template_kwargs: chat_create_completion_params.ChatTemplateKwargs | NotGiven = NOT_GIVEN,
166
180
  frequency_penalty: float | NotGiven = NOT_GIVEN,
167
- function_call: object | NotGiven = NOT_GIVEN,
181
+ function_call: chat_create_completion_params.FunctionCall | NotGiven = NOT_GIVEN,
168
182
  functions: Iterable[FunctionDefinitionParam] | NotGiven = NOT_GIVEN,
169
183
  logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN,
170
184
  logprobs: bool | NotGiven = NOT_GIVEN,
@@ -172,7 +186,7 @@ class AsyncChatResource(AsyncAPIResource):
172
186
  max_tokens: int | NotGiven = NOT_GIVEN,
173
187
  metadata: Dict[str, str] | NotGiven = NOT_GIVEN,
174
188
  n: int | NotGiven = NOT_GIVEN,
175
- parallel_tool_calls: object | NotGiven = NOT_GIVEN,
189
+ parallel_tool_calls: chat_create_completion_params.ParallelToolCalls | NotGiven = NOT_GIVEN,
176
190
  prediction: chat_create_completion_params.Prediction | NotGiven = NOT_GIVEN,
177
191
  presence_penalty: float | NotGiven = NOT_GIVEN,
178
192
  reasoning_effort: str | NotGiven = NOT_GIVEN,
@@ -180,25 +194,29 @@ class AsyncChatResource(AsyncAPIResource):
180
194
  seed: int | NotGiven = NOT_GIVEN,
181
195
  stop: List[str] | NotGiven = NOT_GIVEN,
182
196
  store: bool | NotGiven = NOT_GIVEN,
183
- stream: bool | NotGiven = NOT_GIVEN,
184
- stream_options: chat_create_completion_params.StreamOptions | NotGiven = NOT_GIVEN,
197
+ body_stream: bool | NotGiven = NOT_GIVEN,
198
+ stream_options: StreamOptionsParam | NotGiven = NOT_GIVEN,
185
199
  temperature: float | NotGiven = NOT_GIVEN,
186
- tool_choice: object | NotGiven = NOT_GIVEN,
200
+ tool_choice: chat_create_completion_params.ToolChoice | NotGiven = NOT_GIVEN,
187
201
  tools: Iterable[chat_create_completion_params.Tool] | NotGiven = NOT_GIVEN,
188
202
  top_logprobs: int | NotGiven = NOT_GIVEN,
189
203
  top_p: float | NotGiven = NOT_GIVEN,
190
204
  user: str | NotGiven = NOT_GIVEN,
205
+ web_search_options: chat_create_completion_params.WebSearchOptions | NotGiven = NOT_GIVEN,
191
206
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
192
207
  # The extra values given here take precedence over values defined on the client or passed to this method.
193
208
  extra_headers: Headers | None = None,
194
209
  extra_query: Query | None = None,
195
210
  extra_body: Body | None = None,
196
211
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
197
- ) -> ChatCreateCompletionResponse:
212
+ ) -> ChatCompletionResponse:
198
213
  """
199
214
  Creates a chat completion for the given model
200
215
 
201
216
  Args:
217
+ query_stream: If true, server responds as an SSE stream. Generators may produce an ergonomic
218
+ streaming method when this is set.
219
+
202
220
  extra_headers: Send extra headers
203
221
 
204
222
  extra_query: Add additional query parameters to the request
@@ -231,7 +249,7 @@ class AsyncChatResource(AsyncAPIResource):
231
249
  "seed": seed,
232
250
  "stop": stop,
233
251
  "store": store,
234
- "stream": stream,
252
+ "body_stream": body_stream,
235
253
  "stream_options": stream_options,
236
254
  "temperature": temperature,
237
255
  "tool_choice": tool_choice,
@@ -239,13 +257,20 @@ class AsyncChatResource(AsyncAPIResource):
239
257
  "top_logprobs": top_logprobs,
240
258
  "top_p": top_p,
241
259
  "user": user,
260
+ "web_search_options": web_search_options,
242
261
  },
243
262
  chat_create_completion_params.ChatCreateCompletionParams,
244
263
  ),
245
264
  options=make_request_options(
246
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
265
+ extra_headers=extra_headers,
266
+ extra_query=extra_query,
267
+ extra_body=extra_body,
268
+ timeout=timeout,
269
+ query=await async_maybe_transform(
270
+ {"query_stream": query_stream}, chat_create_completion_params.ChatCreateCompletionParams
271
+ ),
247
272
  ),
248
- cast_to=ChatCreateCompletionResponse,
273
+ cast_to=ChatCompletionResponse,
249
274
  )
250
275
 
251
276
 
@@ -4,7 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import httpx
6
6
 
7
- from ..types import embedding_create_params
7
+ from ..types import embedding_create_embedding_params
8
8
  from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
9
9
  from .._utils import maybe_transform, async_maybe_transform
10
10
  from .._compat import cached_property
@@ -16,7 +16,7 @@ from .._response import (
16
16
  async_to_streamed_response_wrapper,
17
17
  )
18
18
  from .._base_client import make_request_options
19
- from ..types.embedding_create_response import EmbeddingCreateResponse
19
+ from ..types.embedding_response import EmbeddingResponse
20
20
 
21
21
  __all__ = ["EmbeddingsResource", "AsyncEmbeddingsResource"]
22
22
 
@@ -41,7 +41,7 @@ class EmbeddingsResource(SyncAPIResource):
41
41
  """
42
42
  return EmbeddingsResourceWithStreamingResponse(self)
43
43
 
44
- def create(
44
+ def create_embedding(
45
45
  self,
46
46
  *,
47
47
  input: object,
@@ -55,7 +55,7 @@ class EmbeddingsResource(SyncAPIResource):
55
55
  extra_query: Query | None = None,
56
56
  extra_body: Body | None = None,
57
57
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
58
- ) -> EmbeddingCreateResponse:
58
+ ) -> EmbeddingResponse:
59
59
  """
60
60
  Creates an embedding vector representing the input text.
61
61
 
@@ -78,12 +78,12 @@ class EmbeddingsResource(SyncAPIResource):
78
78
  "encoding_format": encoding_format,
79
79
  "user": user,
80
80
  },
81
- embedding_create_params.EmbeddingCreateParams,
81
+ embedding_create_embedding_params.EmbeddingCreateEmbeddingParams,
82
82
  ),
83
83
  options=make_request_options(
84
84
  extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
85
85
  ),
86
- cast_to=EmbeddingCreateResponse,
86
+ cast_to=EmbeddingResponse,
87
87
  )
88
88
 
89
89
 
@@ -107,7 +107,7 @@ class AsyncEmbeddingsResource(AsyncAPIResource):
107
107
  """
108
108
  return AsyncEmbeddingsResourceWithStreamingResponse(self)
109
109
 
110
- async def create(
110
+ async def create_embedding(
111
111
  self,
112
112
  *,
113
113
  input: object,
@@ -121,7 +121,7 @@ class AsyncEmbeddingsResource(AsyncAPIResource):
121
121
  extra_query: Query | None = None,
122
122
  extra_body: Body | None = None,
123
123
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
124
- ) -> EmbeddingCreateResponse:
124
+ ) -> EmbeddingResponse:
125
125
  """
126
126
  Creates an embedding vector representing the input text.
127
127
 
@@ -144,12 +144,12 @@ class AsyncEmbeddingsResource(AsyncAPIResource):
144
144
  "encoding_format": encoding_format,
145
145
  "user": user,
146
146
  },
147
- embedding_create_params.EmbeddingCreateParams,
147
+ embedding_create_embedding_params.EmbeddingCreateEmbeddingParams,
148
148
  ),
149
149
  options=make_request_options(
150
150
  extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
151
151
  ),
152
- cast_to=EmbeddingCreateResponse,
152
+ cast_to=EmbeddingResponse,
153
153
  )
154
154
 
155
155
 
@@ -157,8 +157,8 @@ class EmbeddingsResourceWithRawResponse:
157
157
  def __init__(self, embeddings: EmbeddingsResource) -> None:
158
158
  self._embeddings = embeddings
159
159
 
160
- self.create = to_raw_response_wrapper(
161
- embeddings.create,
160
+ self.create_embedding = to_raw_response_wrapper(
161
+ embeddings.create_embedding,
162
162
  )
163
163
 
164
164
 
@@ -166,8 +166,8 @@ class AsyncEmbeddingsResourceWithRawResponse:
166
166
  def __init__(self, embeddings: AsyncEmbeddingsResource) -> None:
167
167
  self._embeddings = embeddings
168
168
 
169
- self.create = async_to_raw_response_wrapper(
170
- embeddings.create,
169
+ self.create_embedding = async_to_raw_response_wrapper(
170
+ embeddings.create_embedding,
171
171
  )
172
172
 
173
173
 
@@ -175,8 +175,8 @@ class EmbeddingsResourceWithStreamingResponse:
175
175
  def __init__(self, embeddings: EmbeddingsResource) -> None:
176
176
  self._embeddings = embeddings
177
177
 
178
- self.create = to_streamed_response_wrapper(
179
- embeddings.create,
178
+ self.create_embedding = to_streamed_response_wrapper(
179
+ embeddings.create_embedding,
180
180
  )
181
181
 
182
182
 
@@ -184,6 +184,6 @@ class AsyncEmbeddingsResourceWithStreamingResponse:
184
184
  def __init__(self, embeddings: AsyncEmbeddingsResource) -> None:
185
185
  self._embeddings = embeddings
186
186
 
187
- self.create = async_to_streamed_response_wrapper(
188
- embeddings.create,
187
+ self.create_embedding = async_to_streamed_response_wrapper(
188
+ embeddings.create_embedding,
189
189
  )