scale-gp-beta 0.1.0a29__py3-none-any.whl → 0.1.0a31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. scale_gp_beta/_base_client.py +7 -4
  2. scale_gp_beta/_client.py +17 -8
  3. scale_gp_beta/_compat.py +48 -48
  4. scale_gp_beta/_files.py +4 -4
  5. scale_gp_beta/_models.py +63 -42
  6. scale_gp_beta/_types.py +35 -1
  7. scale_gp_beta/_utils/__init__.py +9 -2
  8. scale_gp_beta/_utils/_compat.py +45 -0
  9. scale_gp_beta/_utils/_datetime_parse.py +136 -0
  10. scale_gp_beta/_utils/_transform.py +11 -1
  11. scale_gp_beta/_utils/_typing.py +6 -1
  12. scale_gp_beta/_utils/_utils.py +0 -1
  13. scale_gp_beta/_version.py +1 -1
  14. scale_gp_beta/lib/CONTRIBUTING.MD +53 -0
  15. scale_gp_beta/lib/tracing/trace_queue_manager.py +14 -0
  16. scale_gp_beta/resources/__init__.py +20 -6
  17. scale_gp_beta/resources/chat/completions.py +22 -18
  18. scale_gp_beta/resources/completions.py +18 -18
  19. scale_gp_beta/resources/datasets.py +8 -8
  20. scale_gp_beta/resources/evaluations.py +35 -13
  21. scale_gp_beta/resources/responses.py +314 -0
  22. scale_gp_beta/resources/spans.py +25 -33
  23. scale_gp_beta/types/__init__.py +17 -0
  24. scale_gp_beta/types/chat/chat_completion.py +61 -6
  25. scale_gp_beta/types/chat/chat_completion_chunk.py +17 -1
  26. scale_gp_beta/types/chat/completion_create_params.py +5 -3
  27. scale_gp_beta/types/chat/completion_models_params.py +2 -0
  28. scale_gp_beta/types/chat/model_definition.py +6 -0
  29. scale_gp_beta/types/completion.py +8 -0
  30. scale_gp_beta/types/completion_create_params.py +5 -3
  31. scale_gp_beta/types/container.py +2 -8
  32. scale_gp_beta/types/container_param.py +2 -2
  33. scale_gp_beta/types/dataset.py +3 -1
  34. scale_gp_beta/types/dataset_create_params.py +4 -2
  35. scale_gp_beta/types/dataset_item.py +3 -1
  36. scale_gp_beta/types/dataset_list_params.py +3 -2
  37. scale_gp_beta/types/dataset_update_params.py +3 -2
  38. scale_gp_beta/types/evaluation.py +7 -8
  39. scale_gp_beta/types/evaluation_create_params.py +17 -6
  40. scale_gp_beta/types/evaluation_item.py +3 -1
  41. scale_gp_beta/types/evaluation_list_params.py +3 -1
  42. scale_gp_beta/types/evaluation_task.py +31 -55
  43. scale_gp_beta/types/evaluation_task_param.py +32 -4
  44. scale_gp_beta/types/evaluation_update_params.py +3 -2
  45. scale_gp_beta/types/file.py +3 -1
  46. scale_gp_beta/types/inference_model.py +7 -0
  47. scale_gp_beta/types/model_create_params.py +6 -4
  48. scale_gp_beta/types/model_update_params.py +6 -4
  49. scale_gp_beta/types/question.py +11 -10
  50. scale_gp_beta/types/question_create_params.py +4 -2
  51. scale_gp_beta/types/response.py +2852 -0
  52. scale_gp_beta/types/response_create_params.py +819 -0
  53. scale_gp_beta/types/response_create_response.py +20891 -0
  54. scale_gp_beta/types/shared/__init__.py +3 -0
  55. scale_gp_beta/types/shared/identity.py +16 -0
  56. scale_gp_beta/types/span.py +4 -2
  57. scale_gp_beta/types/span_search_params.py +10 -12
  58. {scale_gp_beta-0.1.0a29.dist-info → scale_gp_beta-0.1.0a31.dist-info}/METADATA +2 -3
  59. {scale_gp_beta-0.1.0a29.dist-info → scale_gp_beta-0.1.0a31.dist-info}/RECORD +61 -52
  60. {scale_gp_beta-0.1.0a29.dist-info → scale_gp_beta-0.1.0a31.dist-info}/WHEEL +0 -0
  61. {scale_gp_beta-0.1.0a29.dist-info → scale_gp_beta-0.1.0a31.dist-info}/licenses/LICENSE +0 -0
@@ -10,7 +10,6 @@ from ._utils import (
10
10
  lru_cache as lru_cache,
11
11
  is_mapping as is_mapping,
12
12
  is_tuple_t as is_tuple_t,
13
- parse_date as parse_date,
14
13
  is_iterable as is_iterable,
15
14
  is_sequence as is_sequence,
16
15
  coerce_float as coerce_float,
@@ -23,7 +22,6 @@ from ._utils import (
23
22
  coerce_boolean as coerce_boolean,
24
23
  coerce_integer as coerce_integer,
25
24
  file_from_path as file_from_path,
26
- parse_datetime as parse_datetime,
27
25
  strip_not_given as strip_not_given,
28
26
  deepcopy_minimal as deepcopy_minimal,
29
27
  get_async_library as get_async_library,
@@ -32,12 +30,20 @@ from ._utils import (
32
30
  maybe_coerce_boolean as maybe_coerce_boolean,
33
31
  maybe_coerce_integer as maybe_coerce_integer,
34
32
  )
33
+ from ._compat import (
34
+ get_args as get_args,
35
+ is_union as is_union,
36
+ get_origin as get_origin,
37
+ is_typeddict as is_typeddict,
38
+ is_literal_type as is_literal_type,
39
+ )
35
40
  from ._typing import (
36
41
  is_list_type as is_list_type,
37
42
  is_union_type as is_union_type,
38
43
  extract_type_arg as extract_type_arg,
39
44
  is_iterable_type as is_iterable_type,
40
45
  is_required_type as is_required_type,
46
+ is_sequence_type as is_sequence_type,
41
47
  is_annotated_type as is_annotated_type,
42
48
  is_type_alias_type as is_type_alias_type,
43
49
  strip_annotated_type as strip_annotated_type,
@@ -55,3 +61,4 @@ from ._reflection import (
55
61
  function_has_argument as function_has_argument,
56
62
  assert_signatures_in_sync as assert_signatures_in_sync,
57
63
  )
64
+ from ._datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
@@ -0,0 +1,45 @@
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ import typing_extensions
5
+ from typing import Any, Type, Union, Literal, Optional
6
+ from datetime import date, datetime
7
+ from typing_extensions import get_args as _get_args, get_origin as _get_origin
8
+
9
+ from .._types import StrBytesIntFloat
10
+ from ._datetime_parse import parse_date as _parse_date, parse_datetime as _parse_datetime
11
+
12
+ _LITERAL_TYPES = {Literal, typing_extensions.Literal}
13
+
14
+
15
+ def get_args(tp: type[Any]) -> tuple[Any, ...]:
16
+ return _get_args(tp)
17
+
18
+
19
+ def get_origin(tp: type[Any]) -> type[Any] | None:
20
+ return _get_origin(tp)
21
+
22
+
23
+ def is_union(tp: Optional[Type[Any]]) -> bool:
24
+ if sys.version_info < (3, 10):
25
+ return tp is Union # type: ignore[comparison-overlap]
26
+ else:
27
+ import types
28
+
29
+ return tp is Union or tp is types.UnionType
30
+
31
+
32
+ def is_typeddict(tp: Type[Any]) -> bool:
33
+ return typing_extensions.is_typeddict(tp)
34
+
35
+
36
+ def is_literal_type(tp: Type[Any]) -> bool:
37
+ return get_origin(tp) in _LITERAL_TYPES
38
+
39
+
40
+ def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
41
+ return _parse_date(value)
42
+
43
+
44
+ def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
45
+ return _parse_datetime(value)
@@ -0,0 +1,136 @@
1
+ """
2
+ This file contains code from https://github.com/pydantic/pydantic/blob/main/pydantic/v1/datetime_parse.py
3
+ without the Pydantic v1 specific errors.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import re
9
+ from typing import Dict, Union, Optional
10
+ from datetime import date, datetime, timezone, timedelta
11
+
12
+ from .._types import StrBytesIntFloat
13
+
14
+ date_expr = r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
15
+ time_expr = (
16
+ r"(?P<hour>\d{1,2}):(?P<minute>\d{1,2})"
17
+ r"(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?"
18
+ r"(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$"
19
+ )
20
+
21
+ date_re = re.compile(f"{date_expr}$")
22
+ datetime_re = re.compile(f"{date_expr}[T ]{time_expr}")
23
+
24
+
25
+ EPOCH = datetime(1970, 1, 1)
26
+ # if greater than this, the number is in ms, if less than or equal it's in seconds
27
+ # (in seconds this is 11th October 2603, in ms it's 20th August 1970)
28
+ MS_WATERSHED = int(2e10)
29
+ # slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9
30
+ MAX_NUMBER = int(3e20)
31
+
32
+
33
+ def _get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]:
34
+ if isinstance(value, (int, float)):
35
+ return value
36
+ try:
37
+ return float(value)
38
+ except ValueError:
39
+ return None
40
+ except TypeError:
41
+ raise TypeError(f"invalid type; expected {native_expected_type}, string, bytes, int or float") from None
42
+
43
+
44
+ def _from_unix_seconds(seconds: Union[int, float]) -> datetime:
45
+ if seconds > MAX_NUMBER:
46
+ return datetime.max
47
+ elif seconds < -MAX_NUMBER:
48
+ return datetime.min
49
+
50
+ while abs(seconds) > MS_WATERSHED:
51
+ seconds /= 1000
52
+ dt = EPOCH + timedelta(seconds=seconds)
53
+ return dt.replace(tzinfo=timezone.utc)
54
+
55
+
56
+ def _parse_timezone(value: Optional[str]) -> Union[None, int, timezone]:
57
+ if value == "Z":
58
+ return timezone.utc
59
+ elif value is not None:
60
+ offset_mins = int(value[-2:]) if len(value) > 3 else 0
61
+ offset = 60 * int(value[1:3]) + offset_mins
62
+ if value[0] == "-":
63
+ offset = -offset
64
+ return timezone(timedelta(minutes=offset))
65
+ else:
66
+ return None
67
+
68
+
69
+ def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
70
+ """
71
+ Parse a datetime/int/float/string and return a datetime.datetime.
72
+
73
+ This function supports time zone offsets. When the input contains one,
74
+ the output uses a timezone with a fixed offset from UTC.
75
+
76
+ Raise ValueError if the input is well formatted but not a valid datetime.
77
+ Raise ValueError if the input isn't well formatted.
78
+ """
79
+ if isinstance(value, datetime):
80
+ return value
81
+
82
+ number = _get_numeric(value, "datetime")
83
+ if number is not None:
84
+ return _from_unix_seconds(number)
85
+
86
+ if isinstance(value, bytes):
87
+ value = value.decode()
88
+
89
+ assert not isinstance(value, (float, int))
90
+
91
+ match = datetime_re.match(value)
92
+ if match is None:
93
+ raise ValueError("invalid datetime format")
94
+
95
+ kw = match.groupdict()
96
+ if kw["microsecond"]:
97
+ kw["microsecond"] = kw["microsecond"].ljust(6, "0")
98
+
99
+ tzinfo = _parse_timezone(kw.pop("tzinfo"))
100
+ kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
101
+ kw_["tzinfo"] = tzinfo
102
+
103
+ return datetime(**kw_) # type: ignore
104
+
105
+
106
+ def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
107
+ """
108
+ Parse a date/int/float/string and return a datetime.date.
109
+
110
+ Raise ValueError if the input is well formatted but not a valid date.
111
+ Raise ValueError if the input isn't well formatted.
112
+ """
113
+ if isinstance(value, date):
114
+ if isinstance(value, datetime):
115
+ return value.date()
116
+ else:
117
+ return value
118
+
119
+ number = _get_numeric(value, "date")
120
+ if number is not None:
121
+ return _from_unix_seconds(number).date()
122
+
123
+ if isinstance(value, bytes):
124
+ value = value.decode()
125
+
126
+ assert not isinstance(value, (float, int))
127
+ match = date_re.match(value)
128
+ if match is None:
129
+ raise ValueError("invalid date format")
130
+
131
+ kw = {k: int(v) for k, v in match.groupdict().items()}
132
+
133
+ try:
134
+ return date(**kw)
135
+ except ValueError:
136
+ raise ValueError("invalid date format") from None
@@ -16,18 +16,20 @@ from ._utils import (
16
16
  lru_cache,
17
17
  is_mapping,
18
18
  is_iterable,
19
+ is_sequence,
19
20
  )
20
21
  from .._files import is_base64_file_input
22
+ from ._compat import get_origin, is_typeddict
21
23
  from ._typing import (
22
24
  is_list_type,
23
25
  is_union_type,
24
26
  extract_type_arg,
25
27
  is_iterable_type,
26
28
  is_required_type,
29
+ is_sequence_type,
27
30
  is_annotated_type,
28
31
  strip_annotated_type,
29
32
  )
30
- from .._compat import get_origin, model_dump, is_typeddict
31
33
 
32
34
  _T = TypeVar("_T")
33
35
 
@@ -167,6 +169,8 @@ def _transform_recursive(
167
169
 
168
170
  Defaults to the same value as the `annotation` argument.
169
171
  """
172
+ from .._compat import model_dump
173
+
170
174
  if inner_type is None:
171
175
  inner_type = annotation
172
176
 
@@ -184,6 +188,8 @@ def _transform_recursive(
184
188
  (is_list_type(stripped_type) and is_list(data))
185
189
  # Iterable[T]
186
190
  or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
191
+ # Sequence[T]
192
+ or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
187
193
  ):
188
194
  # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
189
195
  # intended as an iterable, so we don't transform it.
@@ -329,6 +335,8 @@ async def _async_transform_recursive(
329
335
 
330
336
  Defaults to the same value as the `annotation` argument.
331
337
  """
338
+ from .._compat import model_dump
339
+
332
340
  if inner_type is None:
333
341
  inner_type = annotation
334
342
 
@@ -346,6 +354,8 @@ async def _async_transform_recursive(
346
354
  (is_list_type(stripped_type) and is_list(data))
347
355
  # Iterable[T]
348
356
  or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
357
+ # Sequence[T]
358
+ or (is_sequence_type(stripped_type) and is_sequence(data) and not isinstance(data, str))
349
359
  ):
350
360
  # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
351
361
  # intended as an iterable, so we don't transform it.
@@ -15,7 +15,7 @@ from typing_extensions import (
15
15
 
16
16
  from ._utils import lru_cache
17
17
  from .._types import InheritsGeneric
18
- from .._compat import is_union as _is_union
18
+ from ._compat import is_union as _is_union
19
19
 
20
20
 
21
21
  def is_annotated_type(typ: type) -> bool:
@@ -26,6 +26,11 @@ def is_list_type(typ: type) -> bool:
26
26
  return (get_origin(typ) or typ) == list
27
27
 
28
28
 
29
+ def is_sequence_type(typ: type) -> bool:
30
+ origin = get_origin(typ) or typ
31
+ return origin == typing_extensions.Sequence or origin == typing.Sequence or origin == _c_abc.Sequence
32
+
33
+
29
34
  def is_iterable_type(typ: type) -> bool:
30
35
  """If the given type is `typing.Iterable[T]`"""
31
36
  origin = get_origin(typ) or typ
@@ -22,7 +22,6 @@ from typing_extensions import TypeGuard
22
22
  import sniffio
23
23
 
24
24
  from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike
25
- from .._compat import parse_date as parse_date, parse_datetime as parse_datetime
26
25
 
27
26
  _T = TypeVar("_T")
28
27
  _TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
scale_gp_beta/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "scale_gp_beta"
4
- __version__ = "0.1.0-alpha.29" # x-release-please-version
4
+ __version__ = "0.1.0-alpha.31" # x-release-please-version
@@ -0,0 +1,53 @@
1
+ # Custom Code Patch
2
+ You can in theory add custom code patches to anywhere in the repo but at the risk of encountering many merge conflicts with Stainless into the future.
3
+ Stainless will never use the `/lib` and `/examples` directories. When possible try to only modify these directories.
4
+ If you have to add custom code elsewhere, please keep the footprint small and create a library for most of the logic.
5
+
6
+
7
+ For information on custom code patching with Stainless see [here](https://www.stainless.com/docs/guides/add-custom-code).
8
+
9
+ # Process for Adding Features
10
+ Checkout the `next` branch and pull, then create a branch from `next`.
11
+
12
+ > **_NOTE:_** Stainless uses next to "queue up" updates to the SDK.
13
+ >
14
+ > Stainless will update next with their own logic updates to the SDK along with any changes in the OpenAPI spec and changes to the Stainless
15
+ config on their SaaS platform.
16
+
17
+ Make any code changes you need. Ensuring all the tests for the library are passing.
18
+
19
+ There is strict linting in this repo. Use the following commands in order.
20
+
21
+ ```bash
22
+ rye lint --fix
23
+ ```
24
+
25
+ ```bash
26
+ rye run lint | grep /specific_file.py
27
+ ```
28
+
29
+ `rye run lint` will not work if there are errors with `rye lint --fix`.
30
+ I am unsure why but I get many errors which are ignorable in the rest of the repo when running `rye run lint` so I usually use it
31
+ with grep to target the file I am developing.
32
+
33
+ > **_NOTE:_** The strict linting requires all types to be strictly typed. This can be a pain but is worth considering before developing any new solution.
34
+ > Try and avoid using ignore commands when possible, but sometimes it is unavoidable (see OpenAI tracing Plugin).
35
+
36
+ When commiting, use the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) spec. Usually this is something like:
37
+
38
+ ```bash
39
+ git commit -m "feat: Added OpenAI tracing processor"
40
+ ```
41
+ This allow Stainless to update the release doc with more useful info.
42
+
43
+ **When creating a PR, you must manually change the destination from `main` to `next`.**
44
+
45
+ Once merged, Stainless should pick up the changes and update or create a new PR for `main`.
46
+ You will need to accept changes on this PR, Stainless should then auto merge. Note on occasion I have had to manually merge.
47
+ There was no consequence to this.
48
+
49
+ These PRs Stainless makes have 4 types of changes, all are merged to main via these automatic PR requests:
50
+ 1. Custom Code Changes
51
+ 2. Stainless SDK logic changes
52
+ 3. Changes to the Stainless Config in the SaaS platform
53
+ 4. Changes to the OpenAPI schema
@@ -11,6 +11,8 @@ from .util import configure, is_disabled
11
11
  from .trace_exporter import TraceExporter
12
12
 
13
13
  if TYPE_CHECKING:
14
+ import httpx
15
+
14
16
  from .span import Span
15
17
  from .trace import Trace
16
18
 
@@ -44,6 +46,7 @@ class TraceQueueManager:
44
46
  worker_enabled: Optional[bool] = None,
45
47
  ):
46
48
  self._client = client
49
+ self.register_client(client) if client else None
47
50
  self._attempted_local_client_creation = False
48
51
  self._trigger_queue_size = trigger_queue_size
49
52
  self._trigger_cadence = trigger_cadence
@@ -68,6 +71,17 @@ class TraceQueueManager:
68
71
  log.info("Registering client")
69
72
  self._client = client
70
73
 
74
+ original_prepare_request = self._client._prepare_request
75
+
76
+ def custom_prepare_request(request: "httpx.Request") -> None:
77
+ original_prepare_request(request)
78
+
79
+ # TODO: Hook logic here, we should check to see if we are in the scope of a span, if so we should inject
80
+ # appropriate headers into the request
81
+ # current_span = Scope.get_current_span()
82
+
83
+ self._client._prepare_request = custom_prepare_request # type: ignore
84
+
71
85
  def shutdown(self, timeout: Optional[float] = None) -> None:
72
86
  if not self._worker_enabled:
73
87
  log.debug("No worker to shutdown")
@@ -56,6 +56,14 @@ from .questions import (
56
56
  QuestionsResourceWithStreamingResponse,
57
57
  AsyncQuestionsResourceWithStreamingResponse,
58
58
  )
59
+ from .responses import (
60
+ ResponsesResource,
61
+ AsyncResponsesResource,
62
+ ResponsesResourceWithRawResponse,
63
+ AsyncResponsesResourceWithRawResponse,
64
+ ResponsesResourceWithStreamingResponse,
65
+ AsyncResponsesResourceWithStreamingResponse,
66
+ )
59
67
  from .completions import (
60
68
  CompletionsResource,
61
69
  AsyncCompletionsResource,
@@ -90,6 +98,12 @@ from .evaluation_items import (
90
98
  )
91
99
 
92
100
  __all__ = [
101
+ "ResponsesResource",
102
+ "AsyncResponsesResource",
103
+ "ResponsesResourceWithRawResponse",
104
+ "AsyncResponsesResourceWithRawResponse",
105
+ "ResponsesResourceWithStreamingResponse",
106
+ "AsyncResponsesResourceWithStreamingResponse",
93
107
  "CompletionsResource",
94
108
  "AsyncCompletionsResource",
95
109
  "CompletionsResourceWithRawResponse",
@@ -132,18 +146,18 @@ __all__ = [
132
146
  "AsyncDatasetsResourceWithRawResponse",
133
147
  "DatasetsResourceWithStreamingResponse",
134
148
  "AsyncDatasetsResourceWithStreamingResponse",
135
- "EvaluationsResource",
136
- "AsyncEvaluationsResource",
137
- "EvaluationsResourceWithRawResponse",
138
- "AsyncEvaluationsResourceWithRawResponse",
139
- "EvaluationsResourceWithStreamingResponse",
140
- "AsyncEvaluationsResourceWithStreamingResponse",
141
149
  "DatasetItemsResource",
142
150
  "AsyncDatasetItemsResource",
143
151
  "DatasetItemsResourceWithRawResponse",
144
152
  "AsyncDatasetItemsResourceWithRawResponse",
145
153
  "DatasetItemsResourceWithStreamingResponse",
146
154
  "AsyncDatasetItemsResourceWithStreamingResponse",
155
+ "EvaluationsResource",
156
+ "AsyncEvaluationsResource",
157
+ "EvaluationsResourceWithRawResponse",
158
+ "AsyncEvaluationsResourceWithRawResponse",
159
+ "EvaluationsResourceWithStreamingResponse",
160
+ "AsyncEvaluationsResourceWithStreamingResponse",
147
161
  "EvaluationItemsResource",
148
162
  "AsyncEvaluationItemsResource",
149
163
  "EvaluationItemsResourceWithRawResponse",
@@ -2,12 +2,12 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Any, Dict, List, Union, Iterable, cast
5
+ from typing import Any, Dict, Union, Iterable, cast
6
6
  from typing_extensions import Literal, overload
7
7
 
8
8
  import httpx
9
9
 
10
- from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
10
+ from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr
11
11
  from ..._utils import required_args, maybe_transform, async_maybe_transform
12
12
  from ..._compat import cached_property
13
13
  from ..._resource import SyncAPIResource, AsyncAPIResource
@@ -62,7 +62,7 @@ class CompletionsResource(SyncAPIResource):
62
62
  max_completion_tokens: int | NotGiven = NOT_GIVEN,
63
63
  max_tokens: int | NotGiven = NOT_GIVEN,
64
64
  metadata: Dict[str, str] | NotGiven = NOT_GIVEN,
65
- modalities: List[str] | NotGiven = NOT_GIVEN,
65
+ modalities: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
66
66
  n: int | NotGiven = NOT_GIVEN,
67
67
  parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
68
68
  prediction: Dict[str, object] | NotGiven = NOT_GIVEN,
@@ -70,7 +70,7 @@ class CompletionsResource(SyncAPIResource):
70
70
  reasoning_effort: str | NotGiven = NOT_GIVEN,
71
71
  response_format: Dict[str, object] | NotGiven = NOT_GIVEN,
72
72
  seed: int | NotGiven = NOT_GIVEN,
73
- stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
73
+ stop: Union[str, SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
74
74
  store: bool | NotGiven = NOT_GIVEN,
75
75
  stream: Literal[False] | NotGiven = NOT_GIVEN,
76
76
  stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
@@ -191,7 +191,7 @@ class CompletionsResource(SyncAPIResource):
191
191
  max_completion_tokens: int | NotGiven = NOT_GIVEN,
192
192
  max_tokens: int | NotGiven = NOT_GIVEN,
193
193
  metadata: Dict[str, str] | NotGiven = NOT_GIVEN,
194
- modalities: List[str] | NotGiven = NOT_GIVEN,
194
+ modalities: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
195
195
  n: int | NotGiven = NOT_GIVEN,
196
196
  parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
197
197
  prediction: Dict[str, object] | NotGiven = NOT_GIVEN,
@@ -199,7 +199,7 @@ class CompletionsResource(SyncAPIResource):
199
199
  reasoning_effort: str | NotGiven = NOT_GIVEN,
200
200
  response_format: Dict[str, object] | NotGiven = NOT_GIVEN,
201
201
  seed: int | NotGiven = NOT_GIVEN,
202
- stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
202
+ stop: Union[str, SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
203
203
  store: bool | NotGiven = NOT_GIVEN,
204
204
  stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
205
205
  temperature: float | NotGiven = NOT_GIVEN,
@@ -319,7 +319,7 @@ class CompletionsResource(SyncAPIResource):
319
319
  max_completion_tokens: int | NotGiven = NOT_GIVEN,
320
320
  max_tokens: int | NotGiven = NOT_GIVEN,
321
321
  metadata: Dict[str, str] | NotGiven = NOT_GIVEN,
322
- modalities: List[str] | NotGiven = NOT_GIVEN,
322
+ modalities: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
323
323
  n: int | NotGiven = NOT_GIVEN,
324
324
  parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
325
325
  prediction: Dict[str, object] | NotGiven = NOT_GIVEN,
@@ -327,7 +327,7 @@ class CompletionsResource(SyncAPIResource):
327
327
  reasoning_effort: str | NotGiven = NOT_GIVEN,
328
328
  response_format: Dict[str, object] | NotGiven = NOT_GIVEN,
329
329
  seed: int | NotGiven = NOT_GIVEN,
330
- stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
330
+ stop: Union[str, SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
331
331
  store: bool | NotGiven = NOT_GIVEN,
332
332
  stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
333
333
  temperature: float | NotGiven = NOT_GIVEN,
@@ -446,7 +446,7 @@ class CompletionsResource(SyncAPIResource):
446
446
  max_completion_tokens: int | NotGiven = NOT_GIVEN,
447
447
  max_tokens: int | NotGiven = NOT_GIVEN,
448
448
  metadata: Dict[str, str] | NotGiven = NOT_GIVEN,
449
- modalities: List[str] | NotGiven = NOT_GIVEN,
449
+ modalities: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
450
450
  n: int | NotGiven = NOT_GIVEN,
451
451
  parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
452
452
  prediction: Dict[str, object] | NotGiven = NOT_GIVEN,
@@ -454,7 +454,7 @@ class CompletionsResource(SyncAPIResource):
454
454
  reasoning_effort: str | NotGiven = NOT_GIVEN,
455
455
  response_format: Dict[str, object] | NotGiven = NOT_GIVEN,
456
456
  seed: int | NotGiven = NOT_GIVEN,
457
- stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
457
+ stop: Union[str, SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
458
458
  store: bool | NotGiven = NOT_GIVEN,
459
459
  stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
460
460
  stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
@@ -522,6 +522,7 @@ class CompletionsResource(SyncAPIResource):
522
522
  def models(
523
523
  self,
524
524
  *,
525
+ check_availability: bool | NotGiven = NOT_GIVEN,
525
526
  ending_before: str | NotGiven = NOT_GIVEN,
526
527
  limit: int | NotGiven = NOT_GIVEN,
527
528
  model_vendor: Literal[
@@ -569,6 +570,7 @@ class CompletionsResource(SyncAPIResource):
569
570
  timeout=timeout,
570
571
  query=maybe_transform(
571
572
  {
573
+ "check_availability": check_availability,
572
574
  "ending_before": ending_before,
573
575
  "limit": limit,
574
576
  "model_vendor": model_vendor,
@@ -617,7 +619,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
617
619
  max_completion_tokens: int | NotGiven = NOT_GIVEN,
618
620
  max_tokens: int | NotGiven = NOT_GIVEN,
619
621
  metadata: Dict[str, str] | NotGiven = NOT_GIVEN,
620
- modalities: List[str] | NotGiven = NOT_GIVEN,
622
+ modalities: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
621
623
  n: int | NotGiven = NOT_GIVEN,
622
624
  parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
623
625
  prediction: Dict[str, object] | NotGiven = NOT_GIVEN,
@@ -625,7 +627,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
625
627
  reasoning_effort: str | NotGiven = NOT_GIVEN,
626
628
  response_format: Dict[str, object] | NotGiven = NOT_GIVEN,
627
629
  seed: int | NotGiven = NOT_GIVEN,
628
- stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
630
+ stop: Union[str, SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
629
631
  store: bool | NotGiven = NOT_GIVEN,
630
632
  stream: Literal[False] | NotGiven = NOT_GIVEN,
631
633
  stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
@@ -746,7 +748,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
746
748
  max_completion_tokens: int | NotGiven = NOT_GIVEN,
747
749
  max_tokens: int | NotGiven = NOT_GIVEN,
748
750
  metadata: Dict[str, str] | NotGiven = NOT_GIVEN,
749
- modalities: List[str] | NotGiven = NOT_GIVEN,
751
+ modalities: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
750
752
  n: int | NotGiven = NOT_GIVEN,
751
753
  parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
752
754
  prediction: Dict[str, object] | NotGiven = NOT_GIVEN,
@@ -754,7 +756,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
754
756
  reasoning_effort: str | NotGiven = NOT_GIVEN,
755
757
  response_format: Dict[str, object] | NotGiven = NOT_GIVEN,
756
758
  seed: int | NotGiven = NOT_GIVEN,
757
- stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
759
+ stop: Union[str, SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
758
760
  store: bool | NotGiven = NOT_GIVEN,
759
761
  stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
760
762
  temperature: float | NotGiven = NOT_GIVEN,
@@ -874,7 +876,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
874
876
  max_completion_tokens: int | NotGiven = NOT_GIVEN,
875
877
  max_tokens: int | NotGiven = NOT_GIVEN,
876
878
  metadata: Dict[str, str] | NotGiven = NOT_GIVEN,
877
- modalities: List[str] | NotGiven = NOT_GIVEN,
879
+ modalities: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
878
880
  n: int | NotGiven = NOT_GIVEN,
879
881
  parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
880
882
  prediction: Dict[str, object] | NotGiven = NOT_GIVEN,
@@ -882,7 +884,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
882
884
  reasoning_effort: str | NotGiven = NOT_GIVEN,
883
885
  response_format: Dict[str, object] | NotGiven = NOT_GIVEN,
884
886
  seed: int | NotGiven = NOT_GIVEN,
885
- stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
887
+ stop: Union[str, SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
886
888
  store: bool | NotGiven = NOT_GIVEN,
887
889
  stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
888
890
  temperature: float | NotGiven = NOT_GIVEN,
@@ -1001,7 +1003,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
1001
1003
  max_completion_tokens: int | NotGiven = NOT_GIVEN,
1002
1004
  max_tokens: int | NotGiven = NOT_GIVEN,
1003
1005
  metadata: Dict[str, str] | NotGiven = NOT_GIVEN,
1004
- modalities: List[str] | NotGiven = NOT_GIVEN,
1006
+ modalities: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
1005
1007
  n: int | NotGiven = NOT_GIVEN,
1006
1008
  parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
1007
1009
  prediction: Dict[str, object] | NotGiven = NOT_GIVEN,
@@ -1009,7 +1011,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
1009
1011
  reasoning_effort: str | NotGiven = NOT_GIVEN,
1010
1012
  response_format: Dict[str, object] | NotGiven = NOT_GIVEN,
1011
1013
  seed: int | NotGiven = NOT_GIVEN,
1012
- stop: Union[str, List[str]] | NotGiven = NOT_GIVEN,
1014
+ stop: Union[str, SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
1013
1015
  store: bool | NotGiven = NOT_GIVEN,
1014
1016
  stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN,
1015
1017
  stream_options: Dict[str, object] | NotGiven = NOT_GIVEN,
@@ -1077,6 +1079,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
1077
1079
  async def models(
1078
1080
  self,
1079
1081
  *,
1082
+ check_availability: bool | NotGiven = NOT_GIVEN,
1080
1083
  ending_before: str | NotGiven = NOT_GIVEN,
1081
1084
  limit: int | NotGiven = NOT_GIVEN,
1082
1085
  model_vendor: Literal[
@@ -1124,6 +1127,7 @@ class AsyncCompletionsResource(AsyncAPIResource):
1124
1127
  timeout=timeout,
1125
1128
  query=await async_maybe_transform(
1126
1129
  {
1130
+ "check_availability": check_availability,
1127
1131
  "ending_before": ending_before,
1128
1132
  "limit": limit,
1129
1133
  "model_vendor": model_vendor,