relaxai 0.1.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of relaxai might be problematic. Click here for more details.

Files changed (31) hide show
  1. relaxai/_base_client.py +4 -1
  2. relaxai/_client.py +77 -40
  3. relaxai/_files.py +4 -4
  4. relaxai/_models.py +1 -1
  5. relaxai/_version.py +1 -1
  6. relaxai/resources/__init__.py +0 -14
  7. relaxai/resources/chat.py +46 -21
  8. relaxai/resources/embeddings.py +18 -18
  9. relaxai/resources/models.py +57 -57
  10. relaxai/types/__init__.py +11 -6
  11. relaxai/types/chat_completion_message.py +29 -3
  12. relaxai/types/chat_completion_message_param.py +29 -4
  13. relaxai/types/{chat_create_completion_response.py → chat_completion_response.py} +4 -4
  14. relaxai/types/chat_create_completion_params.py +62 -10
  15. relaxai/types/{embedding_create_params.py → embedding_create_embedding_params.py} +2 -2
  16. relaxai/types/{embedding_create_response.py → embedding_response.py} +4 -4
  17. relaxai/types/function_definition_param.py +6 -2
  18. relaxai/types/{health_check_response.py → health_response.py} +2 -2
  19. relaxai/types/{model_list_response.py → model_list.py} +2 -2
  20. relaxai/types/shared/__init__.py +5 -0
  21. relaxai/types/shared/openai_completion_tokens_details.py +15 -0
  22. relaxai/types/shared/openai_prompt_tokens_details.py +11 -0
  23. relaxai/types/shared/openai_usage.py +19 -0
  24. relaxai/types/stream_options_param.py +11 -0
  25. {relaxai-0.1.0.dist-info → relaxai-0.2.1.dist-info}/METADATA +44 -39
  26. relaxai-0.2.1.dist-info/RECORD +53 -0
  27. relaxai/resources/health.py +0 -134
  28. relaxai/types/usage.py +0 -33
  29. relaxai-0.1.0.dist-info/RECORD +0 -50
  30. {relaxai-0.1.0.dist-info → relaxai-0.2.1.dist-info}/WHEEL +0 -0
  31. {relaxai-0.1.0.dist-info → relaxai-0.2.1.dist-info}/licenses/LICENSE +0 -0
@@ -15,7 +15,7 @@ from .._response import (
15
15
  )
16
16
  from ..types.model import Model
17
17
  from .._base_client import make_request_options
18
- from ..types.model_list_response import ModelListResponse
18
+ from ..types.model_list import ModelList
19
19
 
20
20
  __all__ = ["ModelsResource", "AsyncModelsResource"]
21
21
 
@@ -40,7 +40,26 @@ class ModelsResource(SyncAPIResource):
40
40
  """
41
41
  return ModelsResourceWithStreamingResponse(self)
42
42
 
43
- def retrieve(
43
+ def list_models(
44
+ self,
45
+ *,
46
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
47
+ # The extra values given here take precedence over values defined on the client or passed to this method.
48
+ extra_headers: Headers | None = None,
49
+ extra_query: Query | None = None,
50
+ extra_body: Body | None = None,
51
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
52
+ ) -> ModelList:
53
+ """List all the available models"""
54
+ return self._get(
55
+ "/v1/models",
56
+ options=make_request_options(
57
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
58
+ ),
59
+ cast_to=ModelList,
60
+ )
61
+
62
+ def retrieve_model(
44
63
  self,
45
64
  model: str,
46
65
  *,
@@ -73,25 +92,6 @@ class ModelsResource(SyncAPIResource):
73
92
  cast_to=Model,
74
93
  )
75
94
 
76
- def list(
77
- self,
78
- *,
79
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
80
- # The extra values given here take precedence over values defined on the client or passed to this method.
81
- extra_headers: Headers | None = None,
82
- extra_query: Query | None = None,
83
- extra_body: Body | None = None,
84
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
85
- ) -> ModelListResponse:
86
- """List all the available models"""
87
- return self._get(
88
- "/v1/models",
89
- options=make_request_options(
90
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
91
- ),
92
- cast_to=ModelListResponse,
93
- )
94
-
95
95
 
96
96
  class AsyncModelsResource(AsyncAPIResource):
97
97
  @cached_property
@@ -113,7 +113,26 @@ class AsyncModelsResource(AsyncAPIResource):
113
113
  """
114
114
  return AsyncModelsResourceWithStreamingResponse(self)
115
115
 
116
- async def retrieve(
116
+ async def list_models(
117
+ self,
118
+ *,
119
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
120
+ # The extra values given here take precedence over values defined on the client or passed to this method.
121
+ extra_headers: Headers | None = None,
122
+ extra_query: Query | None = None,
123
+ extra_body: Body | None = None,
124
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
125
+ ) -> ModelList:
126
+ """List all the available models"""
127
+ return await self._get(
128
+ "/v1/models",
129
+ options=make_request_options(
130
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
131
+ ),
132
+ cast_to=ModelList,
133
+ )
134
+
135
+ async def retrieve_model(
117
136
  self,
118
137
  model: str,
119
138
  *,
@@ -146,35 +165,16 @@ class AsyncModelsResource(AsyncAPIResource):
146
165
  cast_to=Model,
147
166
  )
148
167
 
149
- async def list(
150
- self,
151
- *,
152
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
153
- # The extra values given here take precedence over values defined on the client or passed to this method.
154
- extra_headers: Headers | None = None,
155
- extra_query: Query | None = None,
156
- extra_body: Body | None = None,
157
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
158
- ) -> ModelListResponse:
159
- """List all the available models"""
160
- return await self._get(
161
- "/v1/models",
162
- options=make_request_options(
163
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
164
- ),
165
- cast_to=ModelListResponse,
166
- )
167
-
168
168
 
169
169
  class ModelsResourceWithRawResponse:
170
170
  def __init__(self, models: ModelsResource) -> None:
171
171
  self._models = models
172
172
 
173
- self.retrieve = to_raw_response_wrapper(
174
- models.retrieve,
173
+ self.list_models = to_raw_response_wrapper(
174
+ models.list_models,
175
175
  )
176
- self.list = to_raw_response_wrapper(
177
- models.list,
176
+ self.retrieve_model = to_raw_response_wrapper(
177
+ models.retrieve_model,
178
178
  )
179
179
 
180
180
 
@@ -182,11 +182,11 @@ class AsyncModelsResourceWithRawResponse:
182
182
  def __init__(self, models: AsyncModelsResource) -> None:
183
183
  self._models = models
184
184
 
185
- self.retrieve = async_to_raw_response_wrapper(
186
- models.retrieve,
185
+ self.list_models = async_to_raw_response_wrapper(
186
+ models.list_models,
187
187
  )
188
- self.list = async_to_raw_response_wrapper(
189
- models.list,
188
+ self.retrieve_model = async_to_raw_response_wrapper(
189
+ models.retrieve_model,
190
190
  )
191
191
 
192
192
 
@@ -194,11 +194,11 @@ class ModelsResourceWithStreamingResponse:
194
194
  def __init__(self, models: ModelsResource) -> None:
195
195
  self._models = models
196
196
 
197
- self.retrieve = to_streamed_response_wrapper(
198
- models.retrieve,
197
+ self.list_models = to_streamed_response_wrapper(
198
+ models.list_models,
199
199
  )
200
- self.list = to_streamed_response_wrapper(
201
- models.list,
200
+ self.retrieve_model = to_streamed_response_wrapper(
201
+ models.retrieve_model,
202
202
  )
203
203
 
204
204
 
@@ -206,9 +206,9 @@ class AsyncModelsResourceWithStreamingResponse:
206
206
  def __init__(self, models: AsyncModelsResource) -> None:
207
207
  self._models = models
208
208
 
209
- self.retrieve = async_to_streamed_response_wrapper(
210
- models.retrieve,
209
+ self.list_models = async_to_streamed_response_wrapper(
210
+ models.list_models,
211
211
  )
212
- self.list = async_to_streamed_response_wrapper(
213
- models.list,
212
+ self.retrieve_model = async_to_streamed_response_wrapper(
213
+ models.retrieve_model,
214
214
  )
relaxai/types/__init__.py CHANGED
@@ -3,16 +3,21 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from .model import Model as Model
6
- from .usage import Usage as Usage
6
+ from .shared import (
7
+ OpenAIUsage as OpenAIUsage,
8
+ OpenAIPromptTokensDetails as OpenAIPromptTokensDetails,
9
+ OpenAICompletionTokensDetails as OpenAICompletionTokensDetails,
10
+ )
11
+ from .model_list import ModelList as ModelList
7
12
  from .function_call import FunctionCall as FunctionCall
13
+ from .health_response import HealthResponse as HealthResponse
14
+ from .embedding_response import EmbeddingResponse as EmbeddingResponse
8
15
  from .function_call_param import FunctionCallParam as FunctionCallParam
9
- from .model_list_response import ModelListResponse as ModelListResponse
10
- from .health_check_response import HealthCheckResponse as HealthCheckResponse
16
+ from .stream_options_param import StreamOptionsParam as StreamOptionsParam
11
17
  from .content_filter_results import ContentFilterResults as ContentFilterResults
12
18
  from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage
13
- from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
14
- from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse
19
+ from .chat_completion_response import ChatCompletionResponse as ChatCompletionResponse
15
20
  from .function_definition_param import FunctionDefinitionParam as FunctionDefinitionParam
16
21
  from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam
17
22
  from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams
18
- from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse
23
+ from .embedding_create_embedding_params import EmbeddingCreateEmbeddingParams as EmbeddingCreateEmbeddingParams
@@ -1,13 +1,37 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  from typing import List, Optional
4
+ from typing_extensions import Literal
4
5
 
5
6
  from pydantic import Field as FieldInfo
6
7
 
7
8
  from .._models import BaseModel
8
9
  from .function_call import FunctionCall
9
10
 
10
- __all__ = ["ChatCompletionMessage", "MultiContent", "MultiContentImageURL", "ToolCall"]
11
+ __all__ = [
12
+ "ChatCompletionMessage",
13
+ "Annotation",
14
+ "AnnotationURLCitation",
15
+ "MultiContent",
16
+ "MultiContentImageURL",
17
+ "ToolCall",
18
+ ]
19
+
20
+
21
+ class AnnotationURLCitation(BaseModel):
22
+ end_index: int
23
+
24
+ start_index: int
25
+
26
+ title: str
27
+
28
+ url: str
29
+
30
+
31
+ class Annotation(BaseModel):
32
+ type: Optional[Literal["url_citation"]] = None
33
+
34
+ url_citation: Optional[AnnotationURLCitation] = None
11
35
 
12
36
 
13
37
  class MultiContentImageURL(BaseModel):
@@ -35,14 +59,16 @@ class ToolCall(BaseModel):
35
59
 
36
60
 
37
61
  class ChatCompletionMessage(BaseModel):
38
- multi_content: List[MultiContent] = FieldInfo(alias="MultiContent")
62
+ content: str
39
63
 
40
64
  role: str
41
65
 
42
- content: Optional[str] = None
66
+ annotations: Optional[List[Annotation]] = None
43
67
 
44
68
  function_call: Optional[FunctionCall] = None
45
69
 
70
+ multi_content: Optional[List[MultiContent]] = FieldInfo(alias="MultiContent", default=None)
71
+
46
72
  name: Optional[str] = None
47
73
 
48
74
  reasoning_content: Optional[str] = None
@@ -3,12 +3,35 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from typing import Iterable
6
- from typing_extensions import Required, Annotated, TypedDict
6
+ from typing_extensions import Literal, Required, Annotated, TypedDict
7
7
 
8
8
  from .._utils import PropertyInfo
9
9
  from .function_call_param import FunctionCallParam
10
10
 
11
- __all__ = ["ChatCompletionMessageParam", "MultiContent", "MultiContentImageURL", "ToolCall"]
11
+ __all__ = [
12
+ "ChatCompletionMessageParam",
13
+ "Annotation",
14
+ "AnnotationURLCitation",
15
+ "MultiContent",
16
+ "MultiContentImageURL",
17
+ "ToolCall",
18
+ ]
19
+
20
+
21
+ class AnnotationURLCitation(TypedDict, total=False):
22
+ end_index: Required[int]
23
+
24
+ start_index: Required[int]
25
+
26
+ title: Required[str]
27
+
28
+ url: Required[str]
29
+
30
+
31
+ class Annotation(TypedDict, total=False):
32
+ type: Literal["url_citation"]
33
+
34
+ url_citation: AnnotationURLCitation
12
35
 
13
36
 
14
37
  class MultiContentImageURL(TypedDict, total=False):
@@ -36,14 +59,16 @@ class ToolCall(TypedDict, total=False):
36
59
 
37
60
 
38
61
  class ChatCompletionMessageParam(TypedDict, total=False):
39
- multi_content: Required[Annotated[Iterable[MultiContent], PropertyInfo(alias="MultiContent")]]
62
+ content: Required[str]
40
63
 
41
64
  role: Required[str]
42
65
 
43
- content: str
66
+ annotations: Iterable[Annotation]
44
67
 
45
68
  function_call: FunctionCallParam
46
69
 
70
+ multi_content: Annotated[Iterable[MultiContent], PropertyInfo(alias="MultiContent")]
71
+
47
72
  name: str
48
73
 
49
74
  reasoning_content: str
@@ -4,13 +4,13 @@ from typing import Dict, List, Optional
4
4
 
5
5
  from pydantic import Field as FieldInfo
6
6
 
7
- from .usage import Usage
8
7
  from .._models import BaseModel
8
+ from .shared.openai_usage import OpenAIUsage
9
9
  from .content_filter_results import ContentFilterResults
10
10
  from .chat_completion_message import ChatCompletionMessage
11
11
 
12
12
  __all__ = [
13
- "ChatCreateCompletionResponse",
13
+ "ChatCompletionResponse",
14
14
  "Choice",
15
15
  "ChoiceLogprobs",
16
16
  "ChoiceLogprobsContent",
@@ -59,7 +59,7 @@ class PromptFilterResult(BaseModel):
59
59
  content_filter_results: Optional[ContentFilterResults] = None
60
60
 
61
61
 
62
- class ChatCreateCompletionResponse(BaseModel):
62
+ class ChatCompletionResponse(BaseModel):
63
63
  id: str
64
64
 
65
65
  choices: List[Choice]
@@ -74,6 +74,6 @@ class ChatCreateCompletionResponse(BaseModel):
74
74
 
75
75
  system_fingerprint: str
76
76
 
77
- usage: Usage
77
+ usage: OpenAIUsage
78
78
 
79
79
  prompt_filter_results: Optional[List[PromptFilterResult]] = None
@@ -3,18 +3,26 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from typing import Dict, List, Iterable
6
- from typing_extensions import Required, TypedDict
6
+ from typing_extensions import Required, Annotated, TypedDict
7
7
 
8
+ from .._utils import PropertyInfo
9
+ from .stream_options_param import StreamOptionsParam
8
10
  from .function_definition_param import FunctionDefinitionParam
9
11
  from .chat_completion_message_param import ChatCompletionMessageParam
10
12
 
11
13
  __all__ = [
12
14
  "ChatCreateCompletionParams",
15
+ "ChatTemplateKwargs",
16
+ "FunctionCall",
17
+ "ParallelToolCalls",
13
18
  "Prediction",
14
19
  "ResponseFormat",
15
20
  "ResponseFormatJsonSchema",
16
- "StreamOptions",
21
+ "ToolChoice",
17
22
  "Tool",
23
+ "WebSearchOptions",
24
+ "WebSearchOptionsUserLocation",
25
+ "WebSearchOptionsUserLocationApproximate",
18
26
  ]
19
27
 
20
28
 
@@ -23,11 +31,17 @@ class ChatCreateCompletionParams(TypedDict, total=False):
23
31
 
24
32
  model: Required[str]
25
33
 
26
- chat_template_kwargs: object
34
+ query_stream: Annotated[bool, PropertyInfo(alias="stream")]
35
+ """If true, server responds as an SSE stream.
36
+
37
+ Generators may produce an ergonomic streaming method when this is set.
38
+ """
39
+
40
+ chat_template_kwargs: ChatTemplateKwargs
27
41
 
28
42
  frequency_penalty: float
29
43
 
30
- function_call: object
44
+ function_call: FunctionCall
31
45
 
32
46
  functions: Iterable[FunctionDefinitionParam]
33
47
 
@@ -43,7 +57,7 @@ class ChatCreateCompletionParams(TypedDict, total=False):
43
57
 
44
58
  n: int
45
59
 
46
- parallel_tool_calls: object
60
+ parallel_tool_calls: ParallelToolCalls
47
61
 
48
62
  prediction: Prediction
49
63
 
@@ -59,13 +73,13 @@ class ChatCreateCompletionParams(TypedDict, total=False):
59
73
 
60
74
  store: bool
61
75
 
62
- stream: bool
76
+ body_stream: Annotated[bool, PropertyInfo(alias="stream")]
63
77
 
64
- stream_options: StreamOptions
78
+ stream_options: StreamOptionsParam
65
79
 
66
80
  temperature: float
67
81
 
68
- tool_choice: object
82
+ tool_choice: ToolChoice
69
83
 
70
84
  tools: Iterable[Tool]
71
85
 
@@ -75,6 +89,20 @@ class ChatCreateCompletionParams(TypedDict, total=False):
75
89
 
76
90
  user: str
77
91
 
92
+ web_search_options: WebSearchOptions
93
+
94
+
95
+ class ChatTemplateKwargs(TypedDict, total=False):
96
+ pass
97
+
98
+
99
+ class FunctionCall(TypedDict, total=False):
100
+ pass
101
+
102
+
103
+ class ParallelToolCalls(TypedDict, total=False):
104
+ pass
105
+
78
106
 
79
107
  class Prediction(TypedDict, total=False):
80
108
  content: Required[str]
@@ -96,11 +124,35 @@ class ResponseFormat(TypedDict, total=False):
96
124
  type: str
97
125
 
98
126
 
99
- class StreamOptions(TypedDict, total=False):
100
- include_usage: bool
127
+ class ToolChoice(TypedDict, total=False):
128
+ pass
101
129
 
102
130
 
103
131
  class Tool(TypedDict, total=False):
104
132
  type: Required[str]
105
133
 
106
134
  function: FunctionDefinitionParam
135
+
136
+
137
+ class WebSearchOptionsUserLocationApproximate(TypedDict, total=False):
138
+ city: str
139
+
140
+ country: str
141
+
142
+ latitude: float
143
+
144
+ longitude: float
145
+
146
+ state: str
147
+
148
+
149
+ class WebSearchOptionsUserLocation(TypedDict, total=False):
150
+ approximate: WebSearchOptionsUserLocationApproximate
151
+
152
+ type: str
153
+
154
+
155
+ class WebSearchOptions(TypedDict, total=False):
156
+ search_context_size: int
157
+
158
+ user_location: WebSearchOptionsUserLocation
@@ -4,10 +4,10 @@ from __future__ import annotations
4
4
 
5
5
  from typing_extensions import Required, TypedDict
6
6
 
7
- __all__ = ["EmbeddingCreateParams"]
7
+ __all__ = ["EmbeddingCreateEmbeddingParams"]
8
8
 
9
9
 
10
- class EmbeddingCreateParams(TypedDict, total=False):
10
+ class EmbeddingCreateEmbeddingParams(TypedDict, total=False):
11
11
  input: Required[object]
12
12
 
13
13
  model: Required[str]
@@ -4,10 +4,10 @@ from typing import Dict, List
4
4
 
5
5
  from pydantic import Field as FieldInfo
6
6
 
7
- from .usage import Usage
8
7
  from .._models import BaseModel
8
+ from .shared.openai_usage import OpenAIUsage
9
9
 
10
- __all__ = ["EmbeddingCreateResponse", "Data"]
10
+ __all__ = ["EmbeddingResponse", "Data"]
11
11
 
12
12
 
13
13
  class Data(BaseModel):
@@ -18,7 +18,7 @@ class Data(BaseModel):
18
18
  object: str
19
19
 
20
20
 
21
- class EmbeddingCreateResponse(BaseModel):
21
+ class EmbeddingResponse(BaseModel):
22
22
  data: List[Data]
23
23
 
24
24
  http_header: Dict[str, List[str]] = FieldInfo(alias="httpHeader")
@@ -27,4 +27,4 @@ class EmbeddingCreateResponse(BaseModel):
27
27
 
28
28
  object: str
29
29
 
30
- usage: Usage
30
+ usage: OpenAIUsage
@@ -4,13 +4,17 @@ from __future__ import annotations
4
4
 
5
5
  from typing_extensions import Required, TypedDict
6
6
 
7
- __all__ = ["FunctionDefinitionParam"]
7
+ __all__ = ["FunctionDefinitionParam", "Parameters"]
8
+
9
+
10
+ class Parameters(TypedDict, total=False):
11
+ pass
8
12
 
9
13
 
10
14
  class FunctionDefinitionParam(TypedDict, total=False):
11
15
  name: Required[str]
12
16
 
13
- parameters: Required[object]
17
+ parameters: Required[Parameters]
14
18
 
15
19
  description: str
16
20
 
@@ -2,6 +2,6 @@
2
2
 
3
3
  from typing_extensions import TypeAlias
4
4
 
5
- __all__ = ["HealthCheckResponse"]
5
+ __all__ = ["HealthResponse"]
6
6
 
7
- HealthCheckResponse: TypeAlias = str
7
+ HealthResponse: TypeAlias = str
@@ -7,10 +7,10 @@ from pydantic import Field as FieldInfo
7
7
  from .model import Model
8
8
  from .._models import BaseModel
9
9
 
10
- __all__ = ["ModelListResponse"]
10
+ __all__ = ["ModelList"]
11
11
 
12
12
 
13
- class ModelListResponse(BaseModel):
13
+ class ModelList(BaseModel):
14
14
  data: List[Model]
15
15
 
16
16
  http_header: Dict[str, List[str]] = FieldInfo(alias="httpHeader")
@@ -0,0 +1,5 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from .openai_usage import OpenAIUsage as OpenAIUsage
4
+ from .openai_prompt_tokens_details import OpenAIPromptTokensDetails as OpenAIPromptTokensDetails
5
+ from .openai_completion_tokens_details import OpenAICompletionTokensDetails as OpenAICompletionTokensDetails
@@ -0,0 +1,15 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from ..._models import BaseModel
4
+
5
+ __all__ = ["OpenAICompletionTokensDetails"]
6
+
7
+
8
+ class OpenAICompletionTokensDetails(BaseModel):
9
+ accepted_prediction_tokens: int
10
+
11
+ audio_tokens: int
12
+
13
+ reasoning_tokens: int
14
+
15
+ rejected_prediction_tokens: int
@@ -0,0 +1,11 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from ..._models import BaseModel
4
+
5
+ __all__ = ["OpenAIPromptTokensDetails"]
6
+
7
+
8
+ class OpenAIPromptTokensDetails(BaseModel):
9
+ audio_tokens: int
10
+
11
+ cached_tokens: int
@@ -0,0 +1,19 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from ..._models import BaseModel
4
+ from .openai_prompt_tokens_details import OpenAIPromptTokensDetails
5
+ from .openai_completion_tokens_details import OpenAICompletionTokensDetails
6
+
7
+ __all__ = ["OpenAIUsage"]
8
+
9
+
10
+ class OpenAIUsage(BaseModel):
11
+ completion_tokens: int
12
+
13
+ completion_tokens_details: OpenAICompletionTokensDetails
14
+
15
+ prompt_tokens: int
16
+
17
+ prompt_tokens_details: OpenAIPromptTokensDetails
18
+
19
+ total_tokens: int
@@ -0,0 +1,11 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import TypedDict
6
+
7
+ __all__ = ["StreamOptionsParam"]
8
+
9
+
10
+ class StreamOptionsParam(TypedDict, total=False):
11
+ include_usage: bool