relaxai 0.0.1__py3-none-any.whl → 0.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of relaxai might be problematic. Click here for more details.

Files changed (30) hide show
  1. relaxai/_base_client.py +12 -2
  2. relaxai/_client.py +77 -40
  3. relaxai/_files.py +4 -4
  4. relaxai/_models.py +31 -7
  5. relaxai/_version.py +1 -1
  6. relaxai/resources/__init__.py +0 -14
  7. relaxai/resources/chat.py +38 -13
  8. relaxai/resources/embeddings.py +18 -18
  9. relaxai/resources/models.py +57 -57
  10. relaxai/types/__init__.py +11 -6
  11. relaxai/types/chat_completion_message.py +29 -3
  12. relaxai/types/chat_completion_message_param.py +29 -4
  13. relaxai/types/{chat_create_completion_response.py → chat_completion_response.py} +4 -4
  14. relaxai/types/chat_create_completion_params.py +40 -8
  15. relaxai/types/{embedding_create_params.py → embedding_create_embedding_params.py} +2 -2
  16. relaxai/types/{embedding_create_response.py → embedding_response.py} +4 -4
  17. relaxai/types/{health_check_response.py → health_response.py} +2 -2
  18. relaxai/types/{model_list_response.py → model_list.py} +2 -2
  19. relaxai/types/shared/__init__.py +5 -0
  20. relaxai/types/shared/openai_completion_tokens_details.py +15 -0
  21. relaxai/types/shared/openai_prompt_tokens_details.py +11 -0
  22. relaxai/types/shared/openai_usage.py +19 -0
  23. relaxai/types/stream_options_param.py +11 -0
  24. {relaxai-0.0.1.dist-info → relaxai-0.17.0.dist-info}/METADATA +48 -39
  25. relaxai-0.17.0.dist-info/RECORD +53 -0
  26. relaxai/resources/health.py +0 -134
  27. relaxai/types/usage.py +0 -33
  28. relaxai-0.0.1.dist-info/RECORD +0 -50
  29. {relaxai-0.0.1.dist-info → relaxai-0.17.0.dist-info}/WHEEL +0 -0
  30. {relaxai-0.0.1.dist-info → relaxai-0.17.0.dist-info}/licenses/LICENSE +0 -0
@@ -4,7 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import httpx
6
6
 
7
- from ..types import embedding_create_params
7
+ from ..types import embedding_create_embedding_params
8
8
  from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
9
9
  from .._utils import maybe_transform, async_maybe_transform
10
10
  from .._compat import cached_property
@@ -16,7 +16,7 @@ from .._response import (
16
16
  async_to_streamed_response_wrapper,
17
17
  )
18
18
  from .._base_client import make_request_options
19
- from ..types.embedding_create_response import EmbeddingCreateResponse
19
+ from ..types.embedding_response import EmbeddingResponse
20
20
 
21
21
  __all__ = ["EmbeddingsResource", "AsyncEmbeddingsResource"]
22
22
 
@@ -41,7 +41,7 @@ class EmbeddingsResource(SyncAPIResource):
41
41
  """
42
42
  return EmbeddingsResourceWithStreamingResponse(self)
43
43
 
44
- def create(
44
+ def create_embedding(
45
45
  self,
46
46
  *,
47
47
  input: object,
@@ -55,7 +55,7 @@ class EmbeddingsResource(SyncAPIResource):
55
55
  extra_query: Query | None = None,
56
56
  extra_body: Body | None = None,
57
57
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
58
- ) -> EmbeddingCreateResponse:
58
+ ) -> EmbeddingResponse:
59
59
  """
60
60
  Creates an embedding vector representing the input text.
61
61
 
@@ -78,12 +78,12 @@ class EmbeddingsResource(SyncAPIResource):
78
78
  "encoding_format": encoding_format,
79
79
  "user": user,
80
80
  },
81
- embedding_create_params.EmbeddingCreateParams,
81
+ embedding_create_embedding_params.EmbeddingCreateEmbeddingParams,
82
82
  ),
83
83
  options=make_request_options(
84
84
  extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
85
85
  ),
86
- cast_to=EmbeddingCreateResponse,
86
+ cast_to=EmbeddingResponse,
87
87
  )
88
88
 
89
89
 
@@ -107,7 +107,7 @@ class AsyncEmbeddingsResource(AsyncAPIResource):
107
107
  """
108
108
  return AsyncEmbeddingsResourceWithStreamingResponse(self)
109
109
 
110
- async def create(
110
+ async def create_embedding(
111
111
  self,
112
112
  *,
113
113
  input: object,
@@ -121,7 +121,7 @@ class AsyncEmbeddingsResource(AsyncAPIResource):
121
121
  extra_query: Query | None = None,
122
122
  extra_body: Body | None = None,
123
123
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
124
- ) -> EmbeddingCreateResponse:
124
+ ) -> EmbeddingResponse:
125
125
  """
126
126
  Creates an embedding vector representing the input text.
127
127
 
@@ -144,12 +144,12 @@ class AsyncEmbeddingsResource(AsyncAPIResource):
144
144
  "encoding_format": encoding_format,
145
145
  "user": user,
146
146
  },
147
- embedding_create_params.EmbeddingCreateParams,
147
+ embedding_create_embedding_params.EmbeddingCreateEmbeddingParams,
148
148
  ),
149
149
  options=make_request_options(
150
150
  extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
151
151
  ),
152
- cast_to=EmbeddingCreateResponse,
152
+ cast_to=EmbeddingResponse,
153
153
  )
154
154
 
155
155
 
@@ -157,8 +157,8 @@ class EmbeddingsResourceWithRawResponse:
157
157
  def __init__(self, embeddings: EmbeddingsResource) -> None:
158
158
  self._embeddings = embeddings
159
159
 
160
- self.create = to_raw_response_wrapper(
161
- embeddings.create,
160
+ self.create_embedding = to_raw_response_wrapper(
161
+ embeddings.create_embedding,
162
162
  )
163
163
 
164
164
 
@@ -166,8 +166,8 @@ class AsyncEmbeddingsResourceWithRawResponse:
166
166
  def __init__(self, embeddings: AsyncEmbeddingsResource) -> None:
167
167
  self._embeddings = embeddings
168
168
 
169
- self.create = async_to_raw_response_wrapper(
170
- embeddings.create,
169
+ self.create_embedding = async_to_raw_response_wrapper(
170
+ embeddings.create_embedding,
171
171
  )
172
172
 
173
173
 
@@ -175,8 +175,8 @@ class EmbeddingsResourceWithStreamingResponse:
175
175
  def __init__(self, embeddings: EmbeddingsResource) -> None:
176
176
  self._embeddings = embeddings
177
177
 
178
- self.create = to_streamed_response_wrapper(
179
- embeddings.create,
178
+ self.create_embedding = to_streamed_response_wrapper(
179
+ embeddings.create_embedding,
180
180
  )
181
181
 
182
182
 
@@ -184,6 +184,6 @@ class AsyncEmbeddingsResourceWithStreamingResponse:
184
184
  def __init__(self, embeddings: AsyncEmbeddingsResource) -> None:
185
185
  self._embeddings = embeddings
186
186
 
187
- self.create = async_to_streamed_response_wrapper(
188
- embeddings.create,
187
+ self.create_embedding = async_to_streamed_response_wrapper(
188
+ embeddings.create_embedding,
189
189
  )
@@ -15,7 +15,7 @@ from .._response import (
15
15
  )
16
16
  from ..types.model import Model
17
17
  from .._base_client import make_request_options
18
- from ..types.model_list_response import ModelListResponse
18
+ from ..types.model_list import ModelList
19
19
 
20
20
  __all__ = ["ModelsResource", "AsyncModelsResource"]
21
21
 
@@ -40,7 +40,26 @@ class ModelsResource(SyncAPIResource):
40
40
  """
41
41
  return ModelsResourceWithStreamingResponse(self)
42
42
 
43
- def retrieve(
43
+ def list_models(
44
+ self,
45
+ *,
46
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
47
+ # The extra values given here take precedence over values defined on the client or passed to this method.
48
+ extra_headers: Headers | None = None,
49
+ extra_query: Query | None = None,
50
+ extra_body: Body | None = None,
51
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
52
+ ) -> ModelList:
53
+ """List all the available models"""
54
+ return self._get(
55
+ "/v1/models",
56
+ options=make_request_options(
57
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
58
+ ),
59
+ cast_to=ModelList,
60
+ )
61
+
62
+ def retrieve_model(
44
63
  self,
45
64
  model: str,
46
65
  *,
@@ -73,25 +92,6 @@ class ModelsResource(SyncAPIResource):
73
92
  cast_to=Model,
74
93
  )
75
94
 
76
- def list(
77
- self,
78
- *,
79
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
80
- # The extra values given here take precedence over values defined on the client or passed to this method.
81
- extra_headers: Headers | None = None,
82
- extra_query: Query | None = None,
83
- extra_body: Body | None = None,
84
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
85
- ) -> ModelListResponse:
86
- """List all the available models"""
87
- return self._get(
88
- "/v1/models",
89
- options=make_request_options(
90
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
91
- ),
92
- cast_to=ModelListResponse,
93
- )
94
-
95
95
 
96
96
  class AsyncModelsResource(AsyncAPIResource):
97
97
  @cached_property
@@ -113,7 +113,26 @@ class AsyncModelsResource(AsyncAPIResource):
113
113
  """
114
114
  return AsyncModelsResourceWithStreamingResponse(self)
115
115
 
116
- async def retrieve(
116
+ async def list_models(
117
+ self,
118
+ *,
119
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
120
+ # The extra values given here take precedence over values defined on the client or passed to this method.
121
+ extra_headers: Headers | None = None,
122
+ extra_query: Query | None = None,
123
+ extra_body: Body | None = None,
124
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
125
+ ) -> ModelList:
126
+ """List all the available models"""
127
+ return await self._get(
128
+ "/v1/models",
129
+ options=make_request_options(
130
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
131
+ ),
132
+ cast_to=ModelList,
133
+ )
134
+
135
+ async def retrieve_model(
117
136
  self,
118
137
  model: str,
119
138
  *,
@@ -146,35 +165,16 @@ class AsyncModelsResource(AsyncAPIResource):
146
165
  cast_to=Model,
147
166
  )
148
167
 
149
- async def list(
150
- self,
151
- *,
152
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
153
- # The extra values given here take precedence over values defined on the client or passed to this method.
154
- extra_headers: Headers | None = None,
155
- extra_query: Query | None = None,
156
- extra_body: Body | None = None,
157
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
158
- ) -> ModelListResponse:
159
- """List all the available models"""
160
- return await self._get(
161
- "/v1/models",
162
- options=make_request_options(
163
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
164
- ),
165
- cast_to=ModelListResponse,
166
- )
167
-
168
168
 
169
169
  class ModelsResourceWithRawResponse:
170
170
  def __init__(self, models: ModelsResource) -> None:
171
171
  self._models = models
172
172
 
173
- self.retrieve = to_raw_response_wrapper(
174
- models.retrieve,
173
+ self.list_models = to_raw_response_wrapper(
174
+ models.list_models,
175
175
  )
176
- self.list = to_raw_response_wrapper(
177
- models.list,
176
+ self.retrieve_model = to_raw_response_wrapper(
177
+ models.retrieve_model,
178
178
  )
179
179
 
180
180
 
@@ -182,11 +182,11 @@ class AsyncModelsResourceWithRawResponse:
182
182
  def __init__(self, models: AsyncModelsResource) -> None:
183
183
  self._models = models
184
184
 
185
- self.retrieve = async_to_raw_response_wrapper(
186
- models.retrieve,
185
+ self.list_models = async_to_raw_response_wrapper(
186
+ models.list_models,
187
187
  )
188
- self.list = async_to_raw_response_wrapper(
189
- models.list,
188
+ self.retrieve_model = async_to_raw_response_wrapper(
189
+ models.retrieve_model,
190
190
  )
191
191
 
192
192
 
@@ -194,11 +194,11 @@ class ModelsResourceWithStreamingResponse:
194
194
  def __init__(self, models: ModelsResource) -> None:
195
195
  self._models = models
196
196
 
197
- self.retrieve = to_streamed_response_wrapper(
198
- models.retrieve,
197
+ self.list_models = to_streamed_response_wrapper(
198
+ models.list_models,
199
199
  )
200
- self.list = to_streamed_response_wrapper(
201
- models.list,
200
+ self.retrieve_model = to_streamed_response_wrapper(
201
+ models.retrieve_model,
202
202
  )
203
203
 
204
204
 
@@ -206,9 +206,9 @@ class AsyncModelsResourceWithStreamingResponse:
206
206
  def __init__(self, models: AsyncModelsResource) -> None:
207
207
  self._models = models
208
208
 
209
- self.retrieve = async_to_streamed_response_wrapper(
210
- models.retrieve,
209
+ self.list_models = async_to_streamed_response_wrapper(
210
+ models.list_models,
211
211
  )
212
- self.list = async_to_streamed_response_wrapper(
213
- models.list,
212
+ self.retrieve_model = async_to_streamed_response_wrapper(
213
+ models.retrieve_model,
214
214
  )
relaxai/types/__init__.py CHANGED
@@ -3,16 +3,21 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from .model import Model as Model
6
- from .usage import Usage as Usage
6
+ from .shared import (
7
+ OpenAIUsage as OpenAIUsage,
8
+ OpenAIPromptTokensDetails as OpenAIPromptTokensDetails,
9
+ OpenAICompletionTokensDetails as OpenAICompletionTokensDetails,
10
+ )
11
+ from .model_list import ModelList as ModelList
7
12
  from .function_call import FunctionCall as FunctionCall
13
+ from .health_response import HealthResponse as HealthResponse
14
+ from .embedding_response import EmbeddingResponse as EmbeddingResponse
8
15
  from .function_call_param import FunctionCallParam as FunctionCallParam
9
- from .model_list_response import ModelListResponse as ModelListResponse
10
- from .health_check_response import HealthCheckResponse as HealthCheckResponse
16
+ from .stream_options_param import StreamOptionsParam as StreamOptionsParam
11
17
  from .content_filter_results import ContentFilterResults as ContentFilterResults
12
18
  from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage
13
- from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
14
- from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse
19
+ from .chat_completion_response import ChatCompletionResponse as ChatCompletionResponse
15
20
  from .function_definition_param import FunctionDefinitionParam as FunctionDefinitionParam
16
21
  from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam
17
22
  from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams
18
- from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse
23
+ from .embedding_create_embedding_params import EmbeddingCreateEmbeddingParams as EmbeddingCreateEmbeddingParams
@@ -1,13 +1,37 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  from typing import List, Optional
4
+ from typing_extensions import Literal
4
5
 
5
6
  from pydantic import Field as FieldInfo
6
7
 
7
8
  from .._models import BaseModel
8
9
  from .function_call import FunctionCall
9
10
 
10
- __all__ = ["ChatCompletionMessage", "MultiContent", "MultiContentImageURL", "ToolCall"]
11
+ __all__ = [
12
+ "ChatCompletionMessage",
13
+ "Annotation",
14
+ "AnnotationURLCitation",
15
+ "MultiContent",
16
+ "MultiContentImageURL",
17
+ "ToolCall",
18
+ ]
19
+
20
+
21
+ class AnnotationURLCitation(BaseModel):
22
+ end_index: int
23
+
24
+ start_index: int
25
+
26
+ title: str
27
+
28
+ url: str
29
+
30
+
31
+ class Annotation(BaseModel):
32
+ type: Optional[Literal["url_citation"]] = None
33
+
34
+ url_citation: Optional[AnnotationURLCitation] = None
11
35
 
12
36
 
13
37
  class MultiContentImageURL(BaseModel):
@@ -35,14 +59,16 @@ class ToolCall(BaseModel):
35
59
 
36
60
 
37
61
  class ChatCompletionMessage(BaseModel):
38
- multi_content: List[MultiContent] = FieldInfo(alias="MultiContent")
62
+ content: str
39
63
 
40
64
  role: str
41
65
 
42
- content: Optional[str] = None
66
+ annotations: Optional[List[Annotation]] = None
43
67
 
44
68
  function_call: Optional[FunctionCall] = None
45
69
 
70
+ multi_content: Optional[List[MultiContent]] = FieldInfo(alias="MultiContent", default=None)
71
+
46
72
  name: Optional[str] = None
47
73
 
48
74
  reasoning_content: Optional[str] = None
@@ -3,12 +3,35 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from typing import Iterable
6
- from typing_extensions import Required, Annotated, TypedDict
6
+ from typing_extensions import Literal, Required, Annotated, TypedDict
7
7
 
8
8
  from .._utils import PropertyInfo
9
9
  from .function_call_param import FunctionCallParam
10
10
 
11
- __all__ = ["ChatCompletionMessageParam", "MultiContent", "MultiContentImageURL", "ToolCall"]
11
+ __all__ = [
12
+ "ChatCompletionMessageParam",
13
+ "Annotation",
14
+ "AnnotationURLCitation",
15
+ "MultiContent",
16
+ "MultiContentImageURL",
17
+ "ToolCall",
18
+ ]
19
+
20
+
21
+ class AnnotationURLCitation(TypedDict, total=False):
22
+ end_index: Required[int]
23
+
24
+ start_index: Required[int]
25
+
26
+ title: Required[str]
27
+
28
+ url: Required[str]
29
+
30
+
31
+ class Annotation(TypedDict, total=False):
32
+ type: Literal["url_citation"]
33
+
34
+ url_citation: AnnotationURLCitation
12
35
 
13
36
 
14
37
  class MultiContentImageURL(TypedDict, total=False):
@@ -36,14 +59,16 @@ class ToolCall(TypedDict, total=False):
36
59
 
37
60
 
38
61
  class ChatCompletionMessageParam(TypedDict, total=False):
39
- multi_content: Required[Annotated[Iterable[MultiContent], PropertyInfo(alias="MultiContent")]]
62
+ content: Required[str]
40
63
 
41
64
  role: Required[str]
42
65
 
43
- content: str
66
+ annotations: Iterable[Annotation]
44
67
 
45
68
  function_call: FunctionCallParam
46
69
 
70
+ multi_content: Annotated[Iterable[MultiContent], PropertyInfo(alias="MultiContent")]
71
+
47
72
  name: str
48
73
 
49
74
  reasoning_content: str
@@ -4,13 +4,13 @@ from typing import Dict, List, Optional
4
4
 
5
5
  from pydantic import Field as FieldInfo
6
6
 
7
- from .usage import Usage
8
7
  from .._models import BaseModel
8
+ from .shared.openai_usage import OpenAIUsage
9
9
  from .content_filter_results import ContentFilterResults
10
10
  from .chat_completion_message import ChatCompletionMessage
11
11
 
12
12
  __all__ = [
13
- "ChatCreateCompletionResponse",
13
+ "ChatCompletionResponse",
14
14
  "Choice",
15
15
  "ChoiceLogprobs",
16
16
  "ChoiceLogprobsContent",
@@ -59,7 +59,7 @@ class PromptFilterResult(BaseModel):
59
59
  content_filter_results: Optional[ContentFilterResults] = None
60
60
 
61
61
 
62
- class ChatCreateCompletionResponse(BaseModel):
62
+ class ChatCompletionResponse(BaseModel):
63
63
  id: str
64
64
 
65
65
  choices: List[Choice]
@@ -74,6 +74,6 @@ class ChatCreateCompletionResponse(BaseModel):
74
74
 
75
75
  system_fingerprint: str
76
76
 
77
- usage: Usage
77
+ usage: OpenAIUsage
78
78
 
79
79
  prompt_filter_results: Optional[List[PromptFilterResult]] = None
@@ -3,8 +3,10 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from typing import Dict, List, Iterable
6
- from typing_extensions import Required, TypedDict
6
+ from typing_extensions import Required, Annotated, TypedDict
7
7
 
8
+ from .._utils import PropertyInfo
9
+ from .stream_options_param import StreamOptionsParam
8
10
  from .function_definition_param import FunctionDefinitionParam
9
11
  from .chat_completion_message_param import ChatCompletionMessageParam
10
12
 
@@ -13,8 +15,10 @@ __all__ = [
13
15
  "Prediction",
14
16
  "ResponseFormat",
15
17
  "ResponseFormatJsonSchema",
16
- "StreamOptions",
17
18
  "Tool",
19
+ "WebSearchOptions",
20
+ "WebSearchOptionsUserLocation",
21
+ "WebSearchOptionsUserLocationApproximate",
18
22
  ]
19
23
 
20
24
 
@@ -23,6 +27,12 @@ class ChatCreateCompletionParams(TypedDict, total=False):
23
27
 
24
28
  model: Required[str]
25
29
 
30
+ query_stream: Annotated[bool, PropertyInfo(alias="stream")]
31
+ """If true, server responds as an SSE stream.
32
+
33
+ Generators may produce an ergonomic streaming method when this is set.
34
+ """
35
+
26
36
  chat_template_kwargs: object
27
37
 
28
38
  frequency_penalty: float
@@ -59,9 +69,9 @@ class ChatCreateCompletionParams(TypedDict, total=False):
59
69
 
60
70
  store: bool
61
71
 
62
- stream: bool
72
+ body_stream: Annotated[bool, PropertyInfo(alias="stream")]
63
73
 
64
- stream_options: StreamOptions
74
+ stream_options: StreamOptionsParam
65
75
 
66
76
  temperature: float
67
77
 
@@ -75,6 +85,8 @@ class ChatCreateCompletionParams(TypedDict, total=False):
75
85
 
76
86
  user: str
77
87
 
88
+ web_search_options: WebSearchOptions
89
+
78
90
 
79
91
  class Prediction(TypedDict, total=False):
80
92
  content: Required[str]
@@ -96,11 +108,31 @@ class ResponseFormat(TypedDict, total=False):
96
108
  type: str
97
109
 
98
110
 
99
- class StreamOptions(TypedDict, total=False):
100
- include_usage: bool
101
-
102
-
103
111
  class Tool(TypedDict, total=False):
104
112
  type: Required[str]
105
113
 
106
114
  function: FunctionDefinitionParam
115
+
116
+
117
+ class WebSearchOptionsUserLocationApproximate(TypedDict, total=False):
118
+ city: str
119
+
120
+ country: str
121
+
122
+ latitude: float
123
+
124
+ longitude: float
125
+
126
+ state: str
127
+
128
+
129
+ class WebSearchOptionsUserLocation(TypedDict, total=False):
130
+ approximate: WebSearchOptionsUserLocationApproximate
131
+
132
+ type: str
133
+
134
+
135
+ class WebSearchOptions(TypedDict, total=False):
136
+ search_context_size: int
137
+
138
+ user_location: WebSearchOptionsUserLocation
@@ -4,10 +4,10 @@ from __future__ import annotations
4
4
 
5
5
  from typing_extensions import Required, TypedDict
6
6
 
7
- __all__ = ["EmbeddingCreateParams"]
7
+ __all__ = ["EmbeddingCreateEmbeddingParams"]
8
8
 
9
9
 
10
- class EmbeddingCreateParams(TypedDict, total=False):
10
+ class EmbeddingCreateEmbeddingParams(TypedDict, total=False):
11
11
  input: Required[object]
12
12
 
13
13
  model: Required[str]
@@ -4,10 +4,10 @@ from typing import Dict, List
4
4
 
5
5
  from pydantic import Field as FieldInfo
6
6
 
7
- from .usage import Usage
8
7
  from .._models import BaseModel
8
+ from .shared.openai_usage import OpenAIUsage
9
9
 
10
- __all__ = ["EmbeddingCreateResponse", "Data"]
10
+ __all__ = ["EmbeddingResponse", "Data"]
11
11
 
12
12
 
13
13
  class Data(BaseModel):
@@ -18,7 +18,7 @@ class Data(BaseModel):
18
18
  object: str
19
19
 
20
20
 
21
- class EmbeddingCreateResponse(BaseModel):
21
+ class EmbeddingResponse(BaseModel):
22
22
  data: List[Data]
23
23
 
24
24
  http_header: Dict[str, List[str]] = FieldInfo(alias="httpHeader")
@@ -27,4 +27,4 @@ class EmbeddingCreateResponse(BaseModel):
27
27
 
28
28
  object: str
29
29
 
30
- usage: Usage
30
+ usage: OpenAIUsage
@@ -2,6 +2,6 @@
2
2
 
3
3
  from typing_extensions import TypeAlias
4
4
 
5
- __all__ = ["HealthCheckResponse"]
5
+ __all__ = ["HealthResponse"]
6
6
 
7
- HealthCheckResponse: TypeAlias = str
7
+ HealthResponse: TypeAlias = str
@@ -7,10 +7,10 @@ from pydantic import Field as FieldInfo
7
7
  from .model import Model
8
8
  from .._models import BaseModel
9
9
 
10
- __all__ = ["ModelListResponse"]
10
+ __all__ = ["ModelList"]
11
11
 
12
12
 
13
- class ModelListResponse(BaseModel):
13
+ class ModelList(BaseModel):
14
14
  data: List[Model]
15
15
 
16
16
  http_header: Dict[str, List[str]] = FieldInfo(alias="httpHeader")
@@ -0,0 +1,5 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from .openai_usage import OpenAIUsage as OpenAIUsage
4
+ from .openai_prompt_tokens_details import OpenAIPromptTokensDetails as OpenAIPromptTokensDetails
5
+ from .openai_completion_tokens_details import OpenAICompletionTokensDetails as OpenAICompletionTokensDetails
@@ -0,0 +1,15 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from ..._models import BaseModel
4
+
5
+ __all__ = ["OpenAICompletionTokensDetails"]
6
+
7
+
8
+ class OpenAICompletionTokensDetails(BaseModel):
9
+ accepted_prediction_tokens: int
10
+
11
+ audio_tokens: int
12
+
13
+ reasoning_tokens: int
14
+
15
+ rejected_prediction_tokens: int
@@ -0,0 +1,11 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from ..._models import BaseModel
4
+
5
+ __all__ = ["OpenAIPromptTokensDetails"]
6
+
7
+
8
+ class OpenAIPromptTokensDetails(BaseModel):
9
+ audio_tokens: int
10
+
11
+ cached_tokens: int