relaxai 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,18 +16,20 @@ from ._utils import (
16
16
  lru_cache,
17
17
  is_mapping,
18
18
  is_iterable,
19
+ is_sequence,
19
20
  )
20
21
  from .._files import is_base64_file_input
22
+ from ._compat import get_origin, is_typeddict
21
23
  from ._typing import (
22
24
  is_list_type,
23
25
  is_union_type,
24
26
  extract_type_arg,
25
27
  is_iterable_type,
26
28
  is_required_type,
29
+ is_sequence_type,
27
30
  is_annotated_type,
28
31
  strip_annotated_type,
29
32
  )
30
- from .._compat import get_origin, model_dump, is_typeddict
31
33
 
32
34
  _T = TypeVar("_T")
33
35
 
@@ -167,6 +169,8 @@ def _transform_recursive(
167
169
 
168
170
  Defaults to the same value as the `annotation` argument.
169
171
  """
172
+ from .._compat import model_dump
173
+
170
174
  if inner_type is None:
171
175
  inner_type = annotation
172
176
 
@@ -184,6 +188,8 @@ def _transform_recursive(
184
188
  (is_list_type(stripped_type) and is_list(data))
185
189
  # Iterable[T]
186
190
  or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
191
+ # Sequence[T]
192
+ or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
187
193
  ):
188
194
  # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
189
195
  # intended as an iterable, so we don't transform it.
@@ -262,7 +268,7 @@ def _transform_typeddict(
262
268
  annotations = get_type_hints(expected_type, include_extras=True)
263
269
  for key, value in data.items():
264
270
  if not is_given(value):
265
- # we don't need to include `NotGiven` values here as they'll
271
+ # we don't need to include omitted values here as they'll
266
272
  # be stripped out before the request is sent anyway
267
273
  continue
268
274
 
@@ -329,6 +335,8 @@ async def _async_transform_recursive(
329
335
 
330
336
  Defaults to the same value as the `annotation` argument.
331
337
  """
338
+ from .._compat import model_dump
339
+
332
340
  if inner_type is None:
333
341
  inner_type = annotation
334
342
 
@@ -346,6 +354,8 @@ async def _async_transform_recursive(
346
354
  (is_list_type(stripped_type) and is_list(data))
347
355
  # Iterable[T]
348
356
  or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
357
+ # Sequence[T]
358
+ or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
349
359
  ):
350
360
  # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
351
361
  # intended as an iterable, so we don't transform it.
@@ -424,7 +434,7 @@ async def _async_transform_typeddict(
424
434
  annotations = get_type_hints(expected_type, include_extras=True)
425
435
  for key, value in data.items():
426
436
  if not is_given(value):
427
- # we don't need to include `NotGiven` values here as they'll
437
+ # we don't need to include omitted values here as they'll
428
438
  # be stripped out before the request is sent anyway
429
439
  continue
430
440
 
relaxai/_utils/_typing.py CHANGED
@@ -15,7 +15,7 @@ from typing_extensions import (
15
15
 
16
16
  from ._utils import lru_cache
17
17
  from .._types import InheritsGeneric
18
- from .._compat import is_union as _is_union
18
+ from ._compat import is_union as _is_union
19
19
 
20
20
 
21
21
  def is_annotated_type(typ: type) -> bool:
@@ -26,6 +26,11 @@ def is_list_type(typ: type) -> bool:
26
26
  return (get_origin(typ) or typ) == list
27
27
 
28
28
 
29
+ def is_sequence_type(typ: type) -> bool:
30
+ origin = get_origin(typ) or typ
31
+ return origin == typing_extensions.Sequence or origin == typing.Sequence or origin == _c_abc.Sequence
32
+
33
+
29
34
  def is_iterable_type(typ: type) -> bool:
30
35
  """If the given type is `typing.Iterable[T]`"""
31
36
  origin = get_origin(typ) or typ
relaxai/_utils/_utils.py CHANGED
@@ -21,8 +21,7 @@ from typing_extensions import TypeGuard
21
21
 
22
22
  import sniffio
23
23
 
24
- from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike
25
- from .._compat import parse_date as parse_date, parse_datetime as parse_datetime
24
+ from .._types import Omit, NotGiven, FileTypes, HeadersLike
26
25
 
27
26
  _T = TypeVar("_T")
28
27
  _TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
@@ -64,7 +63,7 @@ def _extract_items(
64
63
  try:
65
64
  key = path[index]
66
65
  except IndexError:
67
- if isinstance(obj, NotGiven):
66
+ if not is_given(obj):
68
67
  # no value was provided - we can safely ignore
69
68
  return []
70
69
 
@@ -127,8 +126,8 @@ def _extract_items(
127
126
  return []
128
127
 
129
128
 
130
- def is_given(obj: NotGivenOr[_T]) -> TypeGuard[_T]:
131
- return not isinstance(obj, NotGiven)
129
+ def is_given(obj: _T | NotGiven | Omit) -> TypeGuard[_T]:
130
+ return not isinstance(obj, NotGiven) and not isinstance(obj, Omit)
132
131
 
133
132
 
134
133
  # Type safe methods for narrowing types with TypeVars.
relaxai/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "relaxai"
4
- __version__ = "0.2.0" # x-release-please-version
4
+ __version__ = "0.3.0" # x-release-please-version
@@ -8,6 +8,14 @@ from .chat import (
8
8
  ChatResourceWithStreamingResponse,
9
9
  AsyncChatResourceWithStreamingResponse,
10
10
  )
11
+ from .tools import (
12
+ ToolsResource,
13
+ AsyncToolsResource,
14
+ ToolsResourceWithRawResponse,
15
+ AsyncToolsResourceWithRawResponse,
16
+ ToolsResourceWithStreamingResponse,
17
+ AsyncToolsResourceWithStreamingResponse,
18
+ )
11
19
  from .models import (
12
20
  ModelsResource,
13
21
  AsyncModelsResource,
@@ -24,6 +32,14 @@ from .embeddings import (
24
32
  EmbeddingsResourceWithStreamingResponse,
25
33
  AsyncEmbeddingsResourceWithStreamingResponse,
26
34
  )
35
+ from .deep_research import (
36
+ DeepResearchResource,
37
+ AsyncDeepResearchResource,
38
+ DeepResearchResourceWithRawResponse,
39
+ AsyncDeepResearchResourceWithRawResponse,
40
+ DeepResearchResourceWithStreamingResponse,
41
+ AsyncDeepResearchResourceWithStreamingResponse,
42
+ )
27
43
 
28
44
  __all__ = [
29
45
  "ChatResource",
@@ -44,4 +60,16 @@ __all__ = [
44
60
  "AsyncModelsResourceWithRawResponse",
45
61
  "ModelsResourceWithStreamingResponse",
46
62
  "AsyncModelsResourceWithStreamingResponse",
63
+ "ToolsResource",
64
+ "AsyncToolsResource",
65
+ "ToolsResourceWithRawResponse",
66
+ "AsyncToolsResourceWithRawResponse",
67
+ "ToolsResourceWithStreamingResponse",
68
+ "AsyncToolsResourceWithStreamingResponse",
69
+ "DeepResearchResource",
70
+ "AsyncDeepResearchResource",
71
+ "DeepResearchResourceWithRawResponse",
72
+ "AsyncDeepResearchResourceWithRawResponse",
73
+ "DeepResearchResourceWithStreamingResponse",
74
+ "AsyncDeepResearchResourceWithStreamingResponse",
47
75
  ]
relaxai/resources/chat.py CHANGED
@@ -2,12 +2,12 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Dict, List, Iterable
5
+ from typing import Dict, Iterable
6
6
 
7
7
  import httpx
8
8
 
9
9
  from ..types import chat_create_completion_params
10
- from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
10
+ from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
11
11
  from .._utils import maybe_transform, async_maybe_transform
12
12
  from .._compat import cached_property
13
13
  from .._resource import SyncAPIResource, AsyncAPIResource
@@ -51,40 +51,40 @@ class ChatResource(SyncAPIResource):
51
51
  *,
52
52
  messages: Iterable[ChatCompletionMessageParam],
53
53
  model: str,
54
- query_stream: bool | NotGiven = NOT_GIVEN,
55
- chat_template_kwargs: chat_create_completion_params.ChatTemplateKwargs | NotGiven = NOT_GIVEN,
56
- frequency_penalty: float | NotGiven = NOT_GIVEN,
57
- function_call: chat_create_completion_params.FunctionCall | NotGiven = NOT_GIVEN,
58
- functions: Iterable[FunctionDefinitionParam] | NotGiven = NOT_GIVEN,
59
- logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN,
60
- logprobs: bool | NotGiven = NOT_GIVEN,
61
- max_completion_tokens: int | NotGiven = NOT_GIVEN,
62
- max_tokens: int | NotGiven = NOT_GIVEN,
63
- metadata: Dict[str, str] | NotGiven = NOT_GIVEN,
64
- n: int | NotGiven = NOT_GIVEN,
65
- parallel_tool_calls: chat_create_completion_params.ParallelToolCalls | NotGiven = NOT_GIVEN,
66
- prediction: chat_create_completion_params.Prediction | NotGiven = NOT_GIVEN,
67
- presence_penalty: float | NotGiven = NOT_GIVEN,
68
- reasoning_effort: str | NotGiven = NOT_GIVEN,
69
- response_format: chat_create_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
70
- seed: int | NotGiven = NOT_GIVEN,
71
- stop: List[str] | NotGiven = NOT_GIVEN,
72
- store: bool | NotGiven = NOT_GIVEN,
73
- body_stream: bool | NotGiven = NOT_GIVEN,
74
- stream_options: StreamOptionsParam | NotGiven = NOT_GIVEN,
75
- temperature: float | NotGiven = NOT_GIVEN,
76
- tool_choice: chat_create_completion_params.ToolChoice | NotGiven = NOT_GIVEN,
77
- tools: Iterable[chat_create_completion_params.Tool] | NotGiven = NOT_GIVEN,
78
- top_logprobs: int | NotGiven = NOT_GIVEN,
79
- top_p: float | NotGiven = NOT_GIVEN,
80
- user: str | NotGiven = NOT_GIVEN,
81
- web_search_options: chat_create_completion_params.WebSearchOptions | NotGiven = NOT_GIVEN,
54
+ query_stream: bool | Omit = omit,
55
+ chat_template_kwargs: chat_create_completion_params.ChatTemplateKwargs | Omit = omit,
56
+ frequency_penalty: float | Omit = omit,
57
+ function_call: chat_create_completion_params.FunctionCall | Omit = omit,
58
+ functions: Iterable[FunctionDefinitionParam] | Omit = omit,
59
+ logit_bias: Dict[str, int] | Omit = omit,
60
+ logprobs: bool | Omit = omit,
61
+ max_completion_tokens: int | Omit = omit,
62
+ max_tokens: int | Omit = omit,
63
+ metadata: Dict[str, str] | Omit = omit,
64
+ n: int | Omit = omit,
65
+ parallel_tool_calls: chat_create_completion_params.ParallelToolCalls | Omit = omit,
66
+ prediction: chat_create_completion_params.Prediction | Omit = omit,
67
+ presence_penalty: float | Omit = omit,
68
+ reasoning_effort: str | Omit = omit,
69
+ response_format: chat_create_completion_params.ResponseFormat | Omit = omit,
70
+ seed: int | Omit = omit,
71
+ stop: SequenceNotStr[str] | Omit = omit,
72
+ store: bool | Omit = omit,
73
+ body_stream: bool | Omit = omit,
74
+ stream_options: StreamOptionsParam | Omit = omit,
75
+ temperature: float | Omit = omit,
76
+ tool_choice: chat_create_completion_params.ToolChoice | Omit = omit,
77
+ tools: Iterable[chat_create_completion_params.Tool] | Omit = omit,
78
+ top_logprobs: int | Omit = omit,
79
+ top_p: float | Omit = omit,
80
+ user: str | Omit = omit,
81
+ web_search_options: chat_create_completion_params.WebSearchOptions | Omit = omit,
82
82
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
83
83
  # The extra values given here take precedence over values defined on the client or passed to this method.
84
84
  extra_headers: Headers | None = None,
85
85
  extra_query: Query | None = None,
86
86
  extra_body: Body | None = None,
87
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
87
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
88
88
  ) -> ChatCompletionResponse:
89
89
  """
90
90
  Creates a chat completion for the given model
@@ -175,40 +175,40 @@ class AsyncChatResource(AsyncAPIResource):
175
175
  *,
176
176
  messages: Iterable[ChatCompletionMessageParam],
177
177
  model: str,
178
- query_stream: bool | NotGiven = NOT_GIVEN,
179
- chat_template_kwargs: chat_create_completion_params.ChatTemplateKwargs | NotGiven = NOT_GIVEN,
180
- frequency_penalty: float | NotGiven = NOT_GIVEN,
181
- function_call: chat_create_completion_params.FunctionCall | NotGiven = NOT_GIVEN,
182
- functions: Iterable[FunctionDefinitionParam] | NotGiven = NOT_GIVEN,
183
- logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN,
184
- logprobs: bool | NotGiven = NOT_GIVEN,
185
- max_completion_tokens: int | NotGiven = NOT_GIVEN,
186
- max_tokens: int | NotGiven = NOT_GIVEN,
187
- metadata: Dict[str, str] | NotGiven = NOT_GIVEN,
188
- n: int | NotGiven = NOT_GIVEN,
189
- parallel_tool_calls: chat_create_completion_params.ParallelToolCalls | NotGiven = NOT_GIVEN,
190
- prediction: chat_create_completion_params.Prediction | NotGiven = NOT_GIVEN,
191
- presence_penalty: float | NotGiven = NOT_GIVEN,
192
- reasoning_effort: str | NotGiven = NOT_GIVEN,
193
- response_format: chat_create_completion_params.ResponseFormat | NotGiven = NOT_GIVEN,
194
- seed: int | NotGiven = NOT_GIVEN,
195
- stop: List[str] | NotGiven = NOT_GIVEN,
196
- store: bool | NotGiven = NOT_GIVEN,
197
- body_stream: bool | NotGiven = NOT_GIVEN,
198
- stream_options: StreamOptionsParam | NotGiven = NOT_GIVEN,
199
- temperature: float | NotGiven = NOT_GIVEN,
200
- tool_choice: chat_create_completion_params.ToolChoice | NotGiven = NOT_GIVEN,
201
- tools: Iterable[chat_create_completion_params.Tool] | NotGiven = NOT_GIVEN,
202
- top_logprobs: int | NotGiven = NOT_GIVEN,
203
- top_p: float | NotGiven = NOT_GIVEN,
204
- user: str | NotGiven = NOT_GIVEN,
205
- web_search_options: chat_create_completion_params.WebSearchOptions | NotGiven = NOT_GIVEN,
178
+ query_stream: bool | Omit = omit,
179
+ chat_template_kwargs: chat_create_completion_params.ChatTemplateKwargs | Omit = omit,
180
+ frequency_penalty: float | Omit = omit,
181
+ function_call: chat_create_completion_params.FunctionCall | Omit = omit,
182
+ functions: Iterable[FunctionDefinitionParam] | Omit = omit,
183
+ logit_bias: Dict[str, int] | Omit = omit,
184
+ logprobs: bool | Omit = omit,
185
+ max_completion_tokens: int | Omit = omit,
186
+ max_tokens: int | Omit = omit,
187
+ metadata: Dict[str, str] | Omit = omit,
188
+ n: int | Omit = omit,
189
+ parallel_tool_calls: chat_create_completion_params.ParallelToolCalls | Omit = omit,
190
+ prediction: chat_create_completion_params.Prediction | Omit = omit,
191
+ presence_penalty: float | Omit = omit,
192
+ reasoning_effort: str | Omit = omit,
193
+ response_format: chat_create_completion_params.ResponseFormat | Omit = omit,
194
+ seed: int | Omit = omit,
195
+ stop: SequenceNotStr[str] | Omit = omit,
196
+ store: bool | Omit = omit,
197
+ body_stream: bool | Omit = omit,
198
+ stream_options: StreamOptionsParam | Omit = omit,
199
+ temperature: float | Omit = omit,
200
+ tool_choice: chat_create_completion_params.ToolChoice | Omit = omit,
201
+ tools: Iterable[chat_create_completion_params.Tool] | Omit = omit,
202
+ top_logprobs: int | Omit = omit,
203
+ top_p: float | Omit = omit,
204
+ user: str | Omit = omit,
205
+ web_search_options: chat_create_completion_params.WebSearchOptions | Omit = omit,
206
206
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
207
207
  # The extra values given here take precedence over values defined on the client or passed to this method.
208
208
  extra_headers: Headers | None = None,
209
209
  extra_query: Query | None = None,
210
210
  extra_body: Body | None = None,
211
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
211
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
212
212
  ) -> ChatCompletionResponse:
213
213
  """
214
214
  Creates a chat completion for the given model
@@ -0,0 +1,165 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ import httpx
6
+
7
+ from ..types import deep_research_create_params
8
+ from .._types import Body, Query, Headers, NoneType, NotGiven, not_given
9
+ from .._utils import maybe_transform, async_maybe_transform
10
+ from .._compat import cached_property
11
+ from .._resource import SyncAPIResource, AsyncAPIResource
12
+ from .._response import (
13
+ to_raw_response_wrapper,
14
+ to_streamed_response_wrapper,
15
+ async_to_raw_response_wrapper,
16
+ async_to_streamed_response_wrapper,
17
+ )
18
+ from .._base_client import make_request_options
19
+ from ..types.deepresearch_request_param import DeepresearchRequestParam
20
+
21
+ __all__ = ["DeepResearchResource", "AsyncDeepResearchResource"]
22
+
23
+
24
+ class DeepResearchResource(SyncAPIResource):
25
+ @cached_property
26
+ def with_raw_response(self) -> DeepResearchResourceWithRawResponse:
27
+ """
28
+ This property can be used as a prefix for any HTTP method call to return
29
+ the raw response object instead of the parsed content.
30
+
31
+ For more information, see https://www.github.com/relax-ai/python-sdk#accessing-raw-response-data-eg-headers
32
+ """
33
+ return DeepResearchResourceWithRawResponse(self)
34
+
35
+ @cached_property
36
+ def with_streaming_response(self) -> DeepResearchResourceWithStreamingResponse:
37
+ """
38
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
39
+
40
+ For more information, see https://www.github.com/relax-ai/python-sdk#with_streaming_response
41
+ """
42
+ return DeepResearchResourceWithStreamingResponse(self)
43
+
44
+ def create(
45
+ self,
46
+ *,
47
+ body: DeepresearchRequestParam,
48
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
49
+ # The extra values given here take precedence over values defined on the client or passed to this method.
50
+ extra_headers: Headers | None = None,
51
+ extra_query: Query | None = None,
52
+ extra_body: Body | None = None,
53
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
54
+ ) -> None:
55
+ """
56
+ Performs deep research on a given topic and returns a detailed report.
57
+
58
+ Args:
59
+ extra_headers: Send extra headers
60
+
61
+ extra_query: Add additional query parameters to the request
62
+
63
+ extra_body: Add additional JSON properties to the request
64
+
65
+ timeout: Override the client-level default timeout for this request, in seconds
66
+ """
67
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
68
+ return self._post(
69
+ "/v1/deep-research",
70
+ body=maybe_transform({"body": body}, deep_research_create_params.DeepResearchCreateParams),
71
+ options=make_request_options(
72
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
73
+ ),
74
+ cast_to=NoneType,
75
+ )
76
+
77
+
78
+ class AsyncDeepResearchResource(AsyncAPIResource):
79
+ @cached_property
80
+ def with_raw_response(self) -> AsyncDeepResearchResourceWithRawResponse:
81
+ """
82
+ This property can be used as a prefix for any HTTP method call to return
83
+ the raw response object instead of the parsed content.
84
+
85
+ For more information, see https://www.github.com/relax-ai/python-sdk#accessing-raw-response-data-eg-headers
86
+ """
87
+ return AsyncDeepResearchResourceWithRawResponse(self)
88
+
89
+ @cached_property
90
+ def with_streaming_response(self) -> AsyncDeepResearchResourceWithStreamingResponse:
91
+ """
92
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
93
+
94
+ For more information, see https://www.github.com/relax-ai/python-sdk#with_streaming_response
95
+ """
96
+ return AsyncDeepResearchResourceWithStreamingResponse(self)
97
+
98
+ async def create(
99
+ self,
100
+ *,
101
+ body: DeepresearchRequestParam,
102
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
103
+ # The extra values given here take precedence over values defined on the client or passed to this method.
104
+ extra_headers: Headers | None = None,
105
+ extra_query: Query | None = None,
106
+ extra_body: Body | None = None,
107
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
108
+ ) -> None:
109
+ """
110
+ Performs deep research on a given topic and returns a detailed report.
111
+
112
+ Args:
113
+ extra_headers: Send extra headers
114
+
115
+ extra_query: Add additional query parameters to the request
116
+
117
+ extra_body: Add additional JSON properties to the request
118
+
119
+ timeout: Override the client-level default timeout for this request, in seconds
120
+ """
121
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
122
+ return await self._post(
123
+ "/v1/deep-research",
124
+ body=await async_maybe_transform({"body": body}, deep_research_create_params.DeepResearchCreateParams),
125
+ options=make_request_options(
126
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
127
+ ),
128
+ cast_to=NoneType,
129
+ )
130
+
131
+
132
+ class DeepResearchResourceWithRawResponse:
133
+ def __init__(self, deep_research: DeepResearchResource) -> None:
134
+ self._deep_research = deep_research
135
+
136
+ self.create = to_raw_response_wrapper(
137
+ deep_research.create,
138
+ )
139
+
140
+
141
+ class AsyncDeepResearchResourceWithRawResponse:
142
+ def __init__(self, deep_research: AsyncDeepResearchResource) -> None:
143
+ self._deep_research = deep_research
144
+
145
+ self.create = async_to_raw_response_wrapper(
146
+ deep_research.create,
147
+ )
148
+
149
+
150
+ class DeepResearchResourceWithStreamingResponse:
151
+ def __init__(self, deep_research: DeepResearchResource) -> None:
152
+ self._deep_research = deep_research
153
+
154
+ self.create = to_streamed_response_wrapper(
155
+ deep_research.create,
156
+ )
157
+
158
+
159
+ class AsyncDeepResearchResourceWithStreamingResponse:
160
+ def __init__(self, deep_research: AsyncDeepResearchResource) -> None:
161
+ self._deep_research = deep_research
162
+
163
+ self.create = async_to_streamed_response_wrapper(
164
+ deep_research.create,
165
+ )
@@ -5,7 +5,7 @@ from __future__ import annotations
5
5
  import httpx
6
6
 
7
7
  from ..types import embedding_create_embedding_params
8
- from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
8
+ from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
9
9
  from .._utils import maybe_transform, async_maybe_transform
10
10
  from .._compat import cached_property
11
11
  from .._resource import SyncAPIResource, AsyncAPIResource
@@ -46,15 +46,15 @@ class EmbeddingsResource(SyncAPIResource):
46
46
  *,
47
47
  input: object,
48
48
  model: str,
49
- dimensions: int | NotGiven = NOT_GIVEN,
50
- encoding_format: str | NotGiven = NOT_GIVEN,
51
- user: str | NotGiven = NOT_GIVEN,
49
+ dimensions: int | Omit = omit,
50
+ encoding_format: str | Omit = omit,
51
+ user: str | Omit = omit,
52
52
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
53
53
  # The extra values given here take precedence over values defined on the client or passed to this method.
54
54
  extra_headers: Headers | None = None,
55
55
  extra_query: Query | None = None,
56
56
  extra_body: Body | None = None,
57
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
57
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
58
58
  ) -> EmbeddingResponse:
59
59
  """
60
60
  Creates an embedding vector representing the input text.
@@ -112,15 +112,15 @@ class AsyncEmbeddingsResource(AsyncAPIResource):
112
112
  *,
113
113
  input: object,
114
114
  model: str,
115
- dimensions: int | NotGiven = NOT_GIVEN,
116
- encoding_format: str | NotGiven = NOT_GIVEN,
117
- user: str | NotGiven = NOT_GIVEN,
115
+ dimensions: int | Omit = omit,
116
+ encoding_format: str | Omit = omit,
117
+ user: str | Omit = omit,
118
118
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
119
119
  # The extra values given here take precedence over values defined on the client or passed to this method.
120
120
  extra_headers: Headers | None = None,
121
121
  extra_query: Query | None = None,
122
122
  extra_body: Body | None = None,
123
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
123
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
124
124
  ) -> EmbeddingResponse:
125
125
  """
126
126
  Creates an embedding vector representing the input text.
@@ -4,7 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import httpx
6
6
 
7
- from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
7
+ from .._types import Body, Query, Headers, NotGiven, not_given
8
8
  from .._compat import cached_property
9
9
  from .._resource import SyncAPIResource, AsyncAPIResource
10
10
  from .._response import (
@@ -48,7 +48,7 @@ class ModelsResource(SyncAPIResource):
48
48
  extra_headers: Headers | None = None,
49
49
  extra_query: Query | None = None,
50
50
  extra_body: Body | None = None,
51
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
51
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
52
52
  ) -> ModelList:
53
53
  """List all the available models"""
54
54
  return self._get(
@@ -68,7 +68,7 @@ class ModelsResource(SyncAPIResource):
68
68
  extra_headers: Headers | None = None,
69
69
  extra_query: Query | None = None,
70
70
  extra_body: Body | None = None,
71
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
71
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
72
72
  ) -> Model:
73
73
  """
74
74
  Get the details of the given model
@@ -121,7 +121,7 @@ class AsyncModelsResource(AsyncAPIResource):
121
121
  extra_headers: Headers | None = None,
122
122
  extra_query: Query | None = None,
123
123
  extra_body: Body | None = None,
124
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
124
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
125
125
  ) -> ModelList:
126
126
  """List all the available models"""
127
127
  return await self._get(
@@ -141,7 +141,7 @@ class AsyncModelsResource(AsyncAPIResource):
141
141
  extra_headers: Headers | None = None,
142
142
  extra_query: Query | None = None,
143
143
  extra_body: Body | None = None,
144
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
144
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
145
145
  ) -> Model:
146
146
  """
147
147
  Get the details of the given model