relaxai 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of relaxai might be problematic. Click here for more details.

Files changed (50) hide show
  1. relaxai/__init__.py +90 -0
  2. relaxai/_base_client.py +1985 -0
  3. relaxai/_client.py +437 -0
  4. relaxai/_compat.py +219 -0
  5. relaxai/_constants.py +14 -0
  6. relaxai/_exceptions.py +108 -0
  7. relaxai/_files.py +123 -0
  8. relaxai/_models.py +805 -0
  9. relaxai/_qs.py +150 -0
  10. relaxai/_resource.py +43 -0
  11. relaxai/_response.py +830 -0
  12. relaxai/_streaming.py +333 -0
  13. relaxai/_types.py +219 -0
  14. relaxai/_utils/__init__.py +57 -0
  15. relaxai/_utils/_logs.py +25 -0
  16. relaxai/_utils/_proxy.py +65 -0
  17. relaxai/_utils/_reflection.py +42 -0
  18. relaxai/_utils/_resources_proxy.py +24 -0
  19. relaxai/_utils/_streams.py +12 -0
  20. relaxai/_utils/_sync.py +86 -0
  21. relaxai/_utils/_transform.py +447 -0
  22. relaxai/_utils/_typing.py +151 -0
  23. relaxai/_utils/_utils.py +422 -0
  24. relaxai/_version.py +4 -0
  25. relaxai/lib/.keep +4 -0
  26. relaxai/py.typed +0 -0
  27. relaxai/resources/__init__.py +61 -0
  28. relaxai/resources/chat.py +285 -0
  29. relaxai/resources/embeddings.py +189 -0
  30. relaxai/resources/health.py +134 -0
  31. relaxai/resources/models.py +214 -0
  32. relaxai/types/__init__.py +18 -0
  33. relaxai/types/chat_completion_message.py +54 -0
  34. relaxai/types/chat_completion_message_param.py +55 -0
  35. relaxai/types/chat_create_completion_params.py +106 -0
  36. relaxai/types/chat_create_completion_response.py +79 -0
  37. relaxai/types/content_filter_results.py +57 -0
  38. relaxai/types/embedding_create_params.py +19 -0
  39. relaxai/types/embedding_create_response.py +30 -0
  40. relaxai/types/function_call.py +13 -0
  41. relaxai/types/function_call_param.py +13 -0
  42. relaxai/types/function_definition_param.py +17 -0
  43. relaxai/types/health_check_response.py +7 -0
  44. relaxai/types/model.py +53 -0
  45. relaxai/types/model_list_response.py +16 -0
  46. relaxai/types/usage.py +33 -0
  47. relaxai-0.0.1.dist-info/METADATA +484 -0
  48. relaxai-0.0.1.dist-info/RECORD +50 -0
  49. relaxai-0.0.1.dist-info/WHEEL +4 -0
  50. relaxai-0.0.1.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,214 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ import httpx
6
+
7
+ from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
8
+ from .._compat import cached_property
9
+ from .._resource import SyncAPIResource, AsyncAPIResource
10
+ from .._response import (
11
+ to_raw_response_wrapper,
12
+ to_streamed_response_wrapper,
13
+ async_to_raw_response_wrapper,
14
+ async_to_streamed_response_wrapper,
15
+ )
16
+ from ..types.model import Model
17
+ from .._base_client import make_request_options
18
+ from ..types.model_list_response import ModelListResponse
19
+
20
+ __all__ = ["ModelsResource", "AsyncModelsResource"]
21
+
22
+
23
+ class ModelsResource(SyncAPIResource):
24
+ @cached_property
25
+ def with_raw_response(self) -> ModelsResourceWithRawResponse:
26
+ """
27
+ This property can be used as a prefix for any HTTP method call to return
28
+ the raw response object instead of the parsed content.
29
+
30
+ For more information, see https://www.github.com/relax-ai/python-sdk#accessing-raw-response-data-eg-headers
31
+ """
32
+ return ModelsResourceWithRawResponse(self)
33
+
34
+ @cached_property
35
+ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse:
36
+ """
37
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
38
+
39
+ For more information, see https://www.github.com/relax-ai/python-sdk#with_streaming_response
40
+ """
41
+ return ModelsResourceWithStreamingResponse(self)
42
+
43
+ def retrieve(
44
+ self,
45
+ model: str,
46
+ *,
47
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
48
+ # The extra values given here take precedence over values defined on the client or passed to this method.
49
+ extra_headers: Headers | None = None,
50
+ extra_query: Query | None = None,
51
+ extra_body: Body | None = None,
52
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
53
+ ) -> Model:
54
+ """
55
+ Get the details of the given model
56
+
57
+ Args:
58
+ extra_headers: Send extra headers
59
+
60
+ extra_query: Add additional query parameters to the request
61
+
62
+ extra_body: Add additional JSON properties to the request
63
+
64
+ timeout: Override the client-level default timeout for this request, in seconds
65
+ """
66
+ if not model:
67
+ raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
68
+ return self._get(
69
+ f"/v1/models/{model}",
70
+ options=make_request_options(
71
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
72
+ ),
73
+ cast_to=Model,
74
+ )
75
+
76
+ def list(
77
+ self,
78
+ *,
79
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
80
+ # The extra values given here take precedence over values defined on the client or passed to this method.
81
+ extra_headers: Headers | None = None,
82
+ extra_query: Query | None = None,
83
+ extra_body: Body | None = None,
84
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
85
+ ) -> ModelListResponse:
86
+ """List all the available models"""
87
+ return self._get(
88
+ "/v1/models",
89
+ options=make_request_options(
90
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
91
+ ),
92
+ cast_to=ModelListResponse,
93
+ )
94
+
95
+
96
+ class AsyncModelsResource(AsyncAPIResource):
97
+ @cached_property
98
+ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse:
99
+ """
100
+ This property can be used as a prefix for any HTTP method call to return
101
+ the raw response object instead of the parsed content.
102
+
103
+ For more information, see https://www.github.com/relax-ai/python-sdk#accessing-raw-response-data-eg-headers
104
+ """
105
+ return AsyncModelsResourceWithRawResponse(self)
106
+
107
+ @cached_property
108
+ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse:
109
+ """
110
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
111
+
112
+ For more information, see https://www.github.com/relax-ai/python-sdk#with_streaming_response
113
+ """
114
+ return AsyncModelsResourceWithStreamingResponse(self)
115
+
116
+ async def retrieve(
117
+ self,
118
+ model: str,
119
+ *,
120
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
121
+ # The extra values given here take precedence over values defined on the client or passed to this method.
122
+ extra_headers: Headers | None = None,
123
+ extra_query: Query | None = None,
124
+ extra_body: Body | None = None,
125
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
126
+ ) -> Model:
127
+ """
128
+ Get the details of the given model
129
+
130
+ Args:
131
+ extra_headers: Send extra headers
132
+
133
+ extra_query: Add additional query parameters to the request
134
+
135
+ extra_body: Add additional JSON properties to the request
136
+
137
+ timeout: Override the client-level default timeout for this request, in seconds
138
+ """
139
+ if not model:
140
+ raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
141
+ return await self._get(
142
+ f"/v1/models/{model}",
143
+ options=make_request_options(
144
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
145
+ ),
146
+ cast_to=Model,
147
+ )
148
+
149
+ async def list(
150
+ self,
151
+ *,
152
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
153
+ # The extra values given here take precedence over values defined on the client or passed to this method.
154
+ extra_headers: Headers | None = None,
155
+ extra_query: Query | None = None,
156
+ extra_body: Body | None = None,
157
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
158
+ ) -> ModelListResponse:
159
+ """List all the available models"""
160
+ return await self._get(
161
+ "/v1/models",
162
+ options=make_request_options(
163
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
164
+ ),
165
+ cast_to=ModelListResponse,
166
+ )
167
+
168
+
169
+ class ModelsResourceWithRawResponse:
170
+ def __init__(self, models: ModelsResource) -> None:
171
+ self._models = models
172
+
173
+ self.retrieve = to_raw_response_wrapper(
174
+ models.retrieve,
175
+ )
176
+ self.list = to_raw_response_wrapper(
177
+ models.list,
178
+ )
179
+
180
+
181
+ class AsyncModelsResourceWithRawResponse:
182
+ def __init__(self, models: AsyncModelsResource) -> None:
183
+ self._models = models
184
+
185
+ self.retrieve = async_to_raw_response_wrapper(
186
+ models.retrieve,
187
+ )
188
+ self.list = async_to_raw_response_wrapper(
189
+ models.list,
190
+ )
191
+
192
+
193
+ class ModelsResourceWithStreamingResponse:
194
+ def __init__(self, models: ModelsResource) -> None:
195
+ self._models = models
196
+
197
+ self.retrieve = to_streamed_response_wrapper(
198
+ models.retrieve,
199
+ )
200
+ self.list = to_streamed_response_wrapper(
201
+ models.list,
202
+ )
203
+
204
+
205
+ class AsyncModelsResourceWithStreamingResponse:
206
+ def __init__(self, models: AsyncModelsResource) -> None:
207
+ self._models = models
208
+
209
+ self.retrieve = async_to_streamed_response_wrapper(
210
+ models.retrieve,
211
+ )
212
+ self.list = async_to_streamed_response_wrapper(
213
+ models.list,
214
+ )
@@ -0,0 +1,18 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from .model import Model as Model
6
+ from .usage import Usage as Usage
7
+ from .function_call import FunctionCall as FunctionCall
8
+ from .function_call_param import FunctionCallParam as FunctionCallParam
9
+ from .model_list_response import ModelListResponse as ModelListResponse
10
+ from .health_check_response import HealthCheckResponse as HealthCheckResponse
11
+ from .content_filter_results import ContentFilterResults as ContentFilterResults
12
+ from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage
13
+ from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
14
+ from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse
15
+ from .function_definition_param import FunctionDefinitionParam as FunctionDefinitionParam
16
+ from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam
17
+ from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams
18
+ from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse
@@ -0,0 +1,54 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+
5
+ from pydantic import Field as FieldInfo
6
+
7
+ from .._models import BaseModel
8
+ from .function_call import FunctionCall
9
+
10
+ __all__ = ["ChatCompletionMessage", "MultiContent", "MultiContentImageURL", "ToolCall"]
11
+
12
+
13
+ class MultiContentImageURL(BaseModel):
14
+ detail: Optional[str] = None
15
+
16
+ url: Optional[str] = None
17
+
18
+
19
+ class MultiContent(BaseModel):
20
+ image_url: Optional[MultiContentImageURL] = None
21
+
22
+ text: Optional[str] = None
23
+
24
+ type: Optional[str] = None
25
+
26
+
27
+ class ToolCall(BaseModel):
28
+ function: FunctionCall
29
+
30
+ type: str
31
+
32
+ id: Optional[str] = None
33
+
34
+ index: Optional[int] = None
35
+
36
+
37
+ class ChatCompletionMessage(BaseModel):
38
+ multi_content: List[MultiContent] = FieldInfo(alias="MultiContent")
39
+
40
+ role: str
41
+
42
+ content: Optional[str] = None
43
+
44
+ function_call: Optional[FunctionCall] = None
45
+
46
+ name: Optional[str] = None
47
+
48
+ reasoning_content: Optional[str] = None
49
+
50
+ refusal: Optional[str] = None
51
+
52
+ tool_call_id: Optional[str] = None
53
+
54
+ tool_calls: Optional[List[ToolCall]] = None
@@ -0,0 +1,55 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Iterable
6
+ from typing_extensions import Required, Annotated, TypedDict
7
+
8
+ from .._utils import PropertyInfo
9
+ from .function_call_param import FunctionCallParam
10
+
11
+ __all__ = ["ChatCompletionMessageParam", "MultiContent", "MultiContentImageURL", "ToolCall"]
12
+
13
+
14
+ class MultiContentImageURL(TypedDict, total=False):
15
+ detail: str
16
+
17
+ url: str
18
+
19
+
20
+ class MultiContent(TypedDict, total=False):
21
+ image_url: MultiContentImageURL
22
+
23
+ text: str
24
+
25
+ type: str
26
+
27
+
28
+ class ToolCall(TypedDict, total=False):
29
+ function: Required[FunctionCallParam]
30
+
31
+ type: Required[str]
32
+
33
+ id: str
34
+
35
+ index: int
36
+
37
+
38
+ class ChatCompletionMessageParam(TypedDict, total=False):
39
+ multi_content: Required[Annotated[Iterable[MultiContent], PropertyInfo(alias="MultiContent")]]
40
+
41
+ role: Required[str]
42
+
43
+ content: str
44
+
45
+ function_call: FunctionCallParam
46
+
47
+ name: str
48
+
49
+ reasoning_content: str
50
+
51
+ refusal: str
52
+
53
+ tool_call_id: str
54
+
55
+ tool_calls: Iterable[ToolCall]
@@ -0,0 +1,106 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict, List, Iterable
6
+ from typing_extensions import Required, TypedDict
7
+
8
+ from .function_definition_param import FunctionDefinitionParam
9
+ from .chat_completion_message_param import ChatCompletionMessageParam
10
+
11
+ __all__ = [
12
+ "ChatCreateCompletionParams",
13
+ "Prediction",
14
+ "ResponseFormat",
15
+ "ResponseFormatJsonSchema",
16
+ "StreamOptions",
17
+ "Tool",
18
+ ]
19
+
20
+
21
+ class ChatCreateCompletionParams(TypedDict, total=False):
22
+ messages: Required[Iterable[ChatCompletionMessageParam]]
23
+
24
+ model: Required[str]
25
+
26
+ chat_template_kwargs: object
27
+
28
+ frequency_penalty: float
29
+
30
+ function_call: object
31
+
32
+ functions: Iterable[FunctionDefinitionParam]
33
+
34
+ logit_bias: Dict[str, int]
35
+
36
+ logprobs: bool
37
+
38
+ max_completion_tokens: int
39
+
40
+ max_tokens: int
41
+
42
+ metadata: Dict[str, str]
43
+
44
+ n: int
45
+
46
+ parallel_tool_calls: object
47
+
48
+ prediction: Prediction
49
+
50
+ presence_penalty: float
51
+
52
+ reasoning_effort: str
53
+
54
+ response_format: ResponseFormat
55
+
56
+ seed: int
57
+
58
+ stop: List[str]
59
+
60
+ store: bool
61
+
62
+ stream: bool
63
+
64
+ stream_options: StreamOptions
65
+
66
+ temperature: float
67
+
68
+ tool_choice: object
69
+
70
+ tools: Iterable[Tool]
71
+
72
+ top_logprobs: int
73
+
74
+ top_p: float
75
+
76
+ user: str
77
+
78
+
79
+ class Prediction(TypedDict, total=False):
80
+ content: Required[str]
81
+
82
+ type: Required[str]
83
+
84
+
85
+ class ResponseFormatJsonSchema(TypedDict, total=False):
86
+ name: Required[str]
87
+
88
+ strict: Required[bool]
89
+
90
+ description: str
91
+
92
+
93
+ class ResponseFormat(TypedDict, total=False):
94
+ json_schema: ResponseFormatJsonSchema
95
+
96
+ type: str
97
+
98
+
99
+ class StreamOptions(TypedDict, total=False):
100
+ include_usage: bool
101
+
102
+
103
+ class Tool(TypedDict, total=False):
104
+ type: Required[str]
105
+
106
+ function: FunctionDefinitionParam
@@ -0,0 +1,79 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Dict, List, Optional
4
+
5
+ from pydantic import Field as FieldInfo
6
+
7
+ from .usage import Usage
8
+ from .._models import BaseModel
9
+ from .content_filter_results import ContentFilterResults
10
+ from .chat_completion_message import ChatCompletionMessage
11
+
12
+ __all__ = [
13
+ "ChatCreateCompletionResponse",
14
+ "Choice",
15
+ "ChoiceLogprobs",
16
+ "ChoiceLogprobsContent",
17
+ "ChoiceLogprobsContentTopLogprob",
18
+ "PromptFilterResult",
19
+ ]
20
+
21
+
22
+ class ChoiceLogprobsContentTopLogprob(BaseModel):
23
+ token: str
24
+
25
+ logprob: float
26
+
27
+ bytes: Optional[str] = None
28
+
29
+
30
+ class ChoiceLogprobsContent(BaseModel):
31
+ token: str
32
+
33
+ logprob: float
34
+
35
+ top_logprobs: List[ChoiceLogprobsContentTopLogprob]
36
+
37
+ bytes: Optional[str] = None
38
+
39
+
40
+ class ChoiceLogprobs(BaseModel):
41
+ content: List[ChoiceLogprobsContent]
42
+
43
+
44
+ class Choice(BaseModel):
45
+ finish_reason: str
46
+
47
+ index: int
48
+
49
+ message: ChatCompletionMessage
50
+
51
+ content_filter_results: Optional[ContentFilterResults] = None
52
+
53
+ logprobs: Optional[ChoiceLogprobs] = None
54
+
55
+
56
+ class PromptFilterResult(BaseModel):
57
+ index: int
58
+
59
+ content_filter_results: Optional[ContentFilterResults] = None
60
+
61
+
62
+ class ChatCreateCompletionResponse(BaseModel):
63
+ id: str
64
+
65
+ choices: List[Choice]
66
+
67
+ created: int
68
+
69
+ http_header: Dict[str, List[str]] = FieldInfo(alias="httpHeader")
70
+
71
+ model: str
72
+
73
+ object: str
74
+
75
+ system_fingerprint: str
76
+
77
+ usage: Usage
78
+
79
+ prompt_filter_results: Optional[List[PromptFilterResult]] = None
@@ -0,0 +1,57 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Optional
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["ContentFilterResults", "Hate", "Jailbreak", "Profanity", "SelfHarm", "Sexual", "Violence"]
8
+
9
+
10
+ class Hate(BaseModel):
11
+ filtered: bool
12
+
13
+ severity: Optional[str] = None
14
+
15
+
16
+ class Jailbreak(BaseModel):
17
+ detected: bool
18
+
19
+ filtered: bool
20
+
21
+
22
+ class Profanity(BaseModel):
23
+ detected: bool
24
+
25
+ filtered: bool
26
+
27
+
28
+ class SelfHarm(BaseModel):
29
+ filtered: bool
30
+
31
+ severity: Optional[str] = None
32
+
33
+
34
+ class Sexual(BaseModel):
35
+ filtered: bool
36
+
37
+ severity: Optional[str] = None
38
+
39
+
40
+ class Violence(BaseModel):
41
+ filtered: bool
42
+
43
+ severity: Optional[str] = None
44
+
45
+
46
+ class ContentFilterResults(BaseModel):
47
+ hate: Optional[Hate] = None
48
+
49
+ jailbreak: Optional[Jailbreak] = None
50
+
51
+ profanity: Optional[Profanity] = None
52
+
53
+ self_harm: Optional[SelfHarm] = None
54
+
55
+ sexual: Optional[Sexual] = None
56
+
57
+ violence: Optional[Violence] = None
@@ -0,0 +1,19 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Required, TypedDict
6
+
7
+ __all__ = ["EmbeddingCreateParams"]
8
+
9
+
10
+ class EmbeddingCreateParams(TypedDict, total=False):
11
+ input: Required[object]
12
+
13
+ model: Required[str]
14
+
15
+ dimensions: int
16
+
17
+ encoding_format: str
18
+
19
+ user: str
@@ -0,0 +1,30 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Dict, List
4
+
5
+ from pydantic import Field as FieldInfo
6
+
7
+ from .usage import Usage
8
+ from .._models import BaseModel
9
+
10
+ __all__ = ["EmbeddingCreateResponse", "Data"]
11
+
12
+
13
+ class Data(BaseModel):
14
+ embedding: List[float]
15
+
16
+ index: int
17
+
18
+ object: str
19
+
20
+
21
+ class EmbeddingCreateResponse(BaseModel):
22
+ data: List[Data]
23
+
24
+ http_header: Dict[str, List[str]] = FieldInfo(alias="httpHeader")
25
+
26
+ model: str
27
+
28
+ object: str
29
+
30
+ usage: Usage
@@ -0,0 +1,13 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Optional
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["FunctionCall"]
8
+
9
+
10
+ class FunctionCall(BaseModel):
11
+ arguments: Optional[str] = None
12
+
13
+ name: Optional[str] = None
@@ -0,0 +1,13 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import TypedDict
6
+
7
+ __all__ = ["FunctionCallParam"]
8
+
9
+
10
+ class FunctionCallParam(TypedDict, total=False):
11
+ arguments: str
12
+
13
+ name: str
@@ -0,0 +1,17 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Required, TypedDict
6
+
7
+ __all__ = ["FunctionDefinitionParam"]
8
+
9
+
10
+ class FunctionDefinitionParam(TypedDict, total=False):
11
+ name: Required[str]
12
+
13
+ parameters: Required[object]
14
+
15
+ description: str
16
+
17
+ strict: bool
@@ -0,0 +1,7 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import TypeAlias
4
+
5
+ __all__ = ["HealthCheckResponse"]
6
+
7
+ HealthCheckResponse: TypeAlias = str