scale-gp-beta 0.1.0a3__py3-none-any.whl → 0.1.0a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. scale_gp_beta/_base_client.py +1 -96
  2. scale_gp_beta/_client.py +17 -17
  3. scale_gp_beta/_models.py +5 -2
  4. scale_gp_beta/_version.py +1 -1
  5. scale_gp_beta/resources/__init__.py +27 -27
  6. scale_gp_beta/resources/chat/completions.py +86 -92
  7. scale_gp_beta/resources/datasets.py +569 -0
  8. scale_gp_beta/resources/{question_sets.py → evaluations.py} +259 -273
  9. scale_gp_beta/resources/models.py +2 -0
  10. scale_gp_beta/types/__init__.py +11 -11
  11. scale_gp_beta/types/dataset.py +27 -0
  12. scale_gp_beta/types/dataset_create_params.py +17 -0
  13. scale_gp_beta/types/{question_set_delete_response.py → dataset_delete_response.py} +3 -3
  14. scale_gp_beta/types/{question_list_params.py → dataset_list_params.py} +4 -2
  15. scale_gp_beta/types/dataset_retrieve_params.py +11 -0
  16. scale_gp_beta/types/dataset_update_params.py +13 -0
  17. scale_gp_beta/types/evaluation.py +30 -0
  18. scale_gp_beta/types/evaluation_archive_response.py +16 -0
  19. scale_gp_beta/types/evaluation_create_params.py +734 -0
  20. scale_gp_beta/types/{question_set_list_params.py → evaluation_list_params.py} +4 -6
  21. scale_gp_beta/types/evaluation_retrieve_params.py +11 -0
  22. scale_gp_beta/types/inference_model.py +16 -2
  23. scale_gp_beta/types/model_list_params.py +1 -0
  24. {scale_gp_beta-0.1.0a3.dist-info → scale_gp_beta-0.1.0a5.dist-info}/METADATA +24 -4
  25. {scale_gp_beta-0.1.0a3.dist-info → scale_gp_beta-0.1.0a5.dist-info}/RECORD +27 -27
  26. {scale_gp_beta-0.1.0a3.dist-info → scale_gp_beta-0.1.0a5.dist-info}/WHEEL +1 -1
  27. scale_gp_beta/resources/questions.py +0 -396
  28. scale_gp_beta/types/question.py +0 -68
  29. scale_gp_beta/types/question_create_params.py +0 -59
  30. scale_gp_beta/types/question_list.py +0 -27
  31. scale_gp_beta/types/question_set.py +0 -106
  32. scale_gp_beta/types/question_set_create_params.py +0 -115
  33. scale_gp_beta/types/question_set_list.py +0 -27
  34. scale_gp_beta/types/question_set_retrieve_params.py +0 -12
  35. scale_gp_beta/types/question_set_update_params.py +0 -23
  36. {scale_gp_beta-0.1.0a3.dist-info → scale_gp_beta-0.1.0a5.dist-info}/licenses/LICENSE +0 -0
@@ -9,7 +9,6 @@ import asyncio
9
9
  import inspect
10
10
  import logging
11
11
  import platform
12
- import warnings
13
12
  import email.utils
14
13
  from types import TracebackType
15
14
  from random import random
@@ -36,7 +35,7 @@ import anyio
36
35
  import httpx
37
36
  import distro
38
37
  import pydantic
39
- from httpx import URL, Limits
38
+ from httpx import URL
40
39
  from pydantic import PrivateAttr
41
40
 
42
41
  from . import _exceptions
@@ -51,13 +50,10 @@ from ._types import (
51
50
  Timeout,
52
51
  NotGiven,
53
52
  ResponseT,
54
- Transport,
55
53
  AnyMapping,
56
54
  PostParser,
57
- ProxiesTypes,
58
55
  RequestFiles,
59
56
  HttpxSendArgs,
60
- AsyncTransport,
61
57
  RequestOptions,
62
58
  HttpxRequestFiles,
63
59
  ModelBuilderProtocol,
@@ -337,9 +333,6 @@ class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]):
337
333
  _base_url: URL
338
334
  max_retries: int
339
335
  timeout: Union[float, Timeout, None]
340
- _limits: httpx.Limits
341
- _proxies: ProxiesTypes | None
342
- _transport: Transport | AsyncTransport | None
343
336
  _strict_response_validation: bool
344
337
  _idempotency_header: str | None
345
338
  _default_stream_cls: type[_DefaultStreamT] | None = None
@@ -352,9 +345,6 @@ class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]):
352
345
  _strict_response_validation: bool,
353
346
  max_retries: int = DEFAULT_MAX_RETRIES,
354
347
  timeout: float | Timeout | None = DEFAULT_TIMEOUT,
355
- limits: httpx.Limits,
356
- transport: Transport | AsyncTransport | None,
357
- proxies: ProxiesTypes | None,
358
348
  custom_headers: Mapping[str, str] | None = None,
359
349
  custom_query: Mapping[str, object] | None = None,
360
350
  ) -> None:
@@ -362,9 +352,6 @@ class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]):
362
352
  self._base_url = self._enforce_trailing_slash(URL(base_url))
363
353
  self.max_retries = max_retries
364
354
  self.timeout = timeout
365
- self._limits = limits
366
- self._proxies = proxies
367
- self._transport = transport
368
355
  self._custom_headers = custom_headers or {}
369
356
  self._custom_query = custom_query or {}
370
357
  self._strict_response_validation = _strict_response_validation
@@ -800,46 +787,11 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
800
787
  base_url: str | URL,
801
788
  max_retries: int = DEFAULT_MAX_RETRIES,
802
789
  timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
803
- transport: Transport | None = None,
804
- proxies: ProxiesTypes | None = None,
805
- limits: Limits | None = None,
806
790
  http_client: httpx.Client | None = None,
807
791
  custom_headers: Mapping[str, str] | None = None,
808
792
  custom_query: Mapping[str, object] | None = None,
809
793
  _strict_response_validation: bool,
810
794
  ) -> None:
811
- kwargs: dict[str, Any] = {}
812
- if limits is not None:
813
- warnings.warn(
814
- "The `connection_pool_limits` argument is deprecated. The `http_client` argument should be passed instead",
815
- category=DeprecationWarning,
816
- stacklevel=3,
817
- )
818
- if http_client is not None:
819
- raise ValueError("The `http_client` argument is mutually exclusive with `connection_pool_limits`")
820
- else:
821
- limits = DEFAULT_CONNECTION_LIMITS
822
-
823
- if transport is not None:
824
- kwargs["transport"] = transport
825
- warnings.warn(
826
- "The `transport` argument is deprecated. The `http_client` argument should be passed instead",
827
- category=DeprecationWarning,
828
- stacklevel=3,
829
- )
830
- if http_client is not None:
831
- raise ValueError("The `http_client` argument is mutually exclusive with `transport`")
832
-
833
- if proxies is not None:
834
- kwargs["proxies"] = proxies
835
- warnings.warn(
836
- "The `proxies` argument is deprecated. The `http_client` argument should be passed instead",
837
- category=DeprecationWarning,
838
- stacklevel=3,
839
- )
840
- if http_client is not None:
841
- raise ValueError("The `http_client` argument is mutually exclusive with `proxies`")
842
-
843
795
  if not is_given(timeout):
844
796
  # if the user passed in a custom http client with a non-default
845
797
  # timeout set then we use that timeout.
@@ -860,12 +812,9 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
860
812
 
861
813
  super().__init__(
862
814
  version=version,
863
- limits=limits,
864
815
  # cast to a valid type because mypy doesn't understand our type narrowing
865
816
  timeout=cast(Timeout, timeout),
866
- proxies=proxies,
867
817
  base_url=base_url,
868
- transport=transport,
869
818
  max_retries=max_retries,
870
819
  custom_query=custom_query,
871
820
  custom_headers=custom_headers,
@@ -875,9 +824,6 @@ class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
875
824
  base_url=base_url,
876
825
  # cast to a valid type because mypy doesn't understand our type narrowing
877
826
  timeout=cast(Timeout, timeout),
878
- limits=limits,
879
- follow_redirects=True,
880
- **kwargs, # type: ignore
881
827
  )
882
828
 
883
829
  def is_closed(self) -> bool:
@@ -1372,45 +1318,10 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
1372
1318
  _strict_response_validation: bool,
1373
1319
  max_retries: int = DEFAULT_MAX_RETRIES,
1374
1320
  timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
1375
- transport: AsyncTransport | None = None,
1376
- proxies: ProxiesTypes | None = None,
1377
- limits: Limits | None = None,
1378
1321
  http_client: httpx.AsyncClient | None = None,
1379
1322
  custom_headers: Mapping[str, str] | None = None,
1380
1323
  custom_query: Mapping[str, object] | None = None,
1381
1324
  ) -> None:
1382
- kwargs: dict[str, Any] = {}
1383
- if limits is not None:
1384
- warnings.warn(
1385
- "The `connection_pool_limits` argument is deprecated. The `http_client` argument should be passed instead",
1386
- category=DeprecationWarning,
1387
- stacklevel=3,
1388
- )
1389
- if http_client is not None:
1390
- raise ValueError("The `http_client` argument is mutually exclusive with `connection_pool_limits`")
1391
- else:
1392
- limits = DEFAULT_CONNECTION_LIMITS
1393
-
1394
- if transport is not None:
1395
- kwargs["transport"] = transport
1396
- warnings.warn(
1397
- "The `transport` argument is deprecated. The `http_client` argument should be passed instead",
1398
- category=DeprecationWarning,
1399
- stacklevel=3,
1400
- )
1401
- if http_client is not None:
1402
- raise ValueError("The `http_client` argument is mutually exclusive with `transport`")
1403
-
1404
- if proxies is not None:
1405
- kwargs["proxies"] = proxies
1406
- warnings.warn(
1407
- "The `proxies` argument is deprecated. The `http_client` argument should be passed instead",
1408
- category=DeprecationWarning,
1409
- stacklevel=3,
1410
- )
1411
- if http_client is not None:
1412
- raise ValueError("The `http_client` argument is mutually exclusive with `proxies`")
1413
-
1414
1325
  if not is_given(timeout):
1415
1326
  # if the user passed in a custom http client with a non-default
1416
1327
  # timeout set then we use that timeout.
@@ -1432,11 +1343,8 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
1432
1343
  super().__init__(
1433
1344
  version=version,
1434
1345
  base_url=base_url,
1435
- limits=limits,
1436
1346
  # cast to a valid type because mypy doesn't understand our type narrowing
1437
1347
  timeout=cast(Timeout, timeout),
1438
- proxies=proxies,
1439
- transport=transport,
1440
1348
  max_retries=max_retries,
1441
1349
  custom_query=custom_query,
1442
1350
  custom_headers=custom_headers,
@@ -1446,9 +1354,6 @@ class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
1446
1354
  base_url=base_url,
1447
1355
  # cast to a valid type because mypy doesn't understand our type narrowing
1448
1356
  timeout=cast(Timeout, timeout),
1449
- limits=limits,
1450
- follow_redirects=True,
1451
- **kwargs, # type: ignore
1452
1357
  )
1453
1358
 
1454
1359
  def is_closed(self) -> bool:
scale_gp_beta/_client.py CHANGED
@@ -24,7 +24,7 @@ from ._utils import (
24
24
  get_async_library,
25
25
  )
26
26
  from ._version import __version__
27
- from .resources import models, inference, questions, completions, question_sets
27
+ from .resources import models, datasets, inference, completions, evaluations
28
28
  from ._streaming import Stream as Stream, AsyncStream as AsyncStream
29
29
  from ._exceptions import APIStatusError, SGPClientError
30
30
  from ._base_client import (
@@ -57,10 +57,10 @@ class SGPClient(SyncAPIClient):
57
57
  completions: completions.CompletionsResource
58
58
  chat: chat.ChatResource
59
59
  inference: inference.InferenceResource
60
- questions: questions.QuestionsResource
61
- question_sets: question_sets.QuestionSetsResource
62
60
  files: files.FilesResource
63
61
  models: models.ModelsResource
62
+ datasets: datasets.DatasetsResource
63
+ evaluations: evaluations.EvaluationsResource
64
64
  with_raw_response: SGPClientWithRawResponse
65
65
  with_streaming_response: SGPClientWithStreamedResponse
66
66
 
@@ -157,10 +157,10 @@ class SGPClient(SyncAPIClient):
157
157
  self.completions = completions.CompletionsResource(self)
158
158
  self.chat = chat.ChatResource(self)
159
159
  self.inference = inference.InferenceResource(self)
160
- self.questions = questions.QuestionsResource(self)
161
- self.question_sets = question_sets.QuestionSetsResource(self)
162
160
  self.files = files.FilesResource(self)
163
161
  self.models = models.ModelsResource(self)
162
+ self.datasets = datasets.DatasetsResource(self)
163
+ self.evaluations = evaluations.EvaluationsResource(self)
164
164
  self.with_raw_response = SGPClientWithRawResponse(self)
165
165
  self.with_streaming_response = SGPClientWithStreamedResponse(self)
166
166
 
@@ -278,10 +278,10 @@ class AsyncSGPClient(AsyncAPIClient):
278
278
  completions: completions.AsyncCompletionsResource
279
279
  chat: chat.AsyncChatResource
280
280
  inference: inference.AsyncInferenceResource
281
- questions: questions.AsyncQuestionsResource
282
- question_sets: question_sets.AsyncQuestionSetsResource
283
281
  files: files.AsyncFilesResource
284
282
  models: models.AsyncModelsResource
283
+ datasets: datasets.AsyncDatasetsResource
284
+ evaluations: evaluations.AsyncEvaluationsResource
285
285
  with_raw_response: AsyncSGPClientWithRawResponse
286
286
  with_streaming_response: AsyncSGPClientWithStreamedResponse
287
287
 
@@ -378,10 +378,10 @@ class AsyncSGPClient(AsyncAPIClient):
378
378
  self.completions = completions.AsyncCompletionsResource(self)
379
379
  self.chat = chat.AsyncChatResource(self)
380
380
  self.inference = inference.AsyncInferenceResource(self)
381
- self.questions = questions.AsyncQuestionsResource(self)
382
- self.question_sets = question_sets.AsyncQuestionSetsResource(self)
383
381
  self.files = files.AsyncFilesResource(self)
384
382
  self.models = models.AsyncModelsResource(self)
383
+ self.datasets = datasets.AsyncDatasetsResource(self)
384
+ self.evaluations = evaluations.AsyncEvaluationsResource(self)
385
385
  self.with_raw_response = AsyncSGPClientWithRawResponse(self)
386
386
  self.with_streaming_response = AsyncSGPClientWithStreamedResponse(self)
387
387
 
@@ -500,10 +500,10 @@ class SGPClientWithRawResponse:
500
500
  self.completions = completions.CompletionsResourceWithRawResponse(client.completions)
501
501
  self.chat = chat.ChatResourceWithRawResponse(client.chat)
502
502
  self.inference = inference.InferenceResourceWithRawResponse(client.inference)
503
- self.questions = questions.QuestionsResourceWithRawResponse(client.questions)
504
- self.question_sets = question_sets.QuestionSetsResourceWithRawResponse(client.question_sets)
505
503
  self.files = files.FilesResourceWithRawResponse(client.files)
506
504
  self.models = models.ModelsResourceWithRawResponse(client.models)
505
+ self.datasets = datasets.DatasetsResourceWithRawResponse(client.datasets)
506
+ self.evaluations = evaluations.EvaluationsResourceWithRawResponse(client.evaluations)
507
507
 
508
508
 
509
509
  class AsyncSGPClientWithRawResponse:
@@ -511,10 +511,10 @@ class AsyncSGPClientWithRawResponse:
511
511
  self.completions = completions.AsyncCompletionsResourceWithRawResponse(client.completions)
512
512
  self.chat = chat.AsyncChatResourceWithRawResponse(client.chat)
513
513
  self.inference = inference.AsyncInferenceResourceWithRawResponse(client.inference)
514
- self.questions = questions.AsyncQuestionsResourceWithRawResponse(client.questions)
515
- self.question_sets = question_sets.AsyncQuestionSetsResourceWithRawResponse(client.question_sets)
516
514
  self.files = files.AsyncFilesResourceWithRawResponse(client.files)
517
515
  self.models = models.AsyncModelsResourceWithRawResponse(client.models)
516
+ self.datasets = datasets.AsyncDatasetsResourceWithRawResponse(client.datasets)
517
+ self.evaluations = evaluations.AsyncEvaluationsResourceWithRawResponse(client.evaluations)
518
518
 
519
519
 
520
520
  class SGPClientWithStreamedResponse:
@@ -522,10 +522,10 @@ class SGPClientWithStreamedResponse:
522
522
  self.completions = completions.CompletionsResourceWithStreamingResponse(client.completions)
523
523
  self.chat = chat.ChatResourceWithStreamingResponse(client.chat)
524
524
  self.inference = inference.InferenceResourceWithStreamingResponse(client.inference)
525
- self.questions = questions.QuestionsResourceWithStreamingResponse(client.questions)
526
- self.question_sets = question_sets.QuestionSetsResourceWithStreamingResponse(client.question_sets)
527
525
  self.files = files.FilesResourceWithStreamingResponse(client.files)
528
526
  self.models = models.ModelsResourceWithStreamingResponse(client.models)
527
+ self.datasets = datasets.DatasetsResourceWithStreamingResponse(client.datasets)
528
+ self.evaluations = evaluations.EvaluationsResourceWithStreamingResponse(client.evaluations)
529
529
 
530
530
 
531
531
  class AsyncSGPClientWithStreamedResponse:
@@ -533,10 +533,10 @@ class AsyncSGPClientWithStreamedResponse:
533
533
  self.completions = completions.AsyncCompletionsResourceWithStreamingResponse(client.completions)
534
534
  self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat)
535
535
  self.inference = inference.AsyncInferenceResourceWithStreamingResponse(client.inference)
536
- self.questions = questions.AsyncQuestionsResourceWithStreamingResponse(client.questions)
537
- self.question_sets = question_sets.AsyncQuestionSetsResourceWithStreamingResponse(client.question_sets)
538
536
  self.files = files.AsyncFilesResourceWithStreamingResponse(client.files)
539
537
  self.models = models.AsyncModelsResourceWithStreamingResponse(client.models)
538
+ self.datasets = datasets.AsyncDatasetsResourceWithStreamingResponse(client.datasets)
539
+ self.evaluations = evaluations.AsyncEvaluationsResourceWithStreamingResponse(client.evaluations)
540
540
 
541
541
 
542
542
  Client = SGPClient
scale_gp_beta/_models.py CHANGED
@@ -65,7 +65,7 @@ from ._compat import (
65
65
  from ._constants import RAW_RESPONSE_HEADER
66
66
 
67
67
  if TYPE_CHECKING:
68
- from pydantic_core.core_schema import ModelField, LiteralSchema, ModelFieldsSchema
68
+ from pydantic_core.core_schema import ModelField, ModelSchema, LiteralSchema, ModelFieldsSchema
69
69
 
70
70
  __all__ = ["BaseModel", "GenericModel"]
71
71
 
@@ -646,15 +646,18 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,
646
646
 
647
647
  def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None:
648
648
  schema = model.__pydantic_core_schema__
649
+ if schema["type"] == "definitions":
650
+ schema = schema["schema"]
651
+
649
652
  if schema["type"] != "model":
650
653
  return None
651
654
 
655
+ schema = cast("ModelSchema", schema)
652
656
  fields_schema = schema["schema"]
653
657
  if fields_schema["type"] != "model-fields":
654
658
  return None
655
659
 
656
660
  fields_schema = cast("ModelFieldsSchema", fields_schema)
657
-
658
661
  field = fields_schema["fields"].get(field_name)
659
662
  if not field:
660
663
  return None
scale_gp_beta/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "scale_gp_beta"
4
- __version__ = "0.1.0-alpha.3" # x-release-please-version
4
+ __version__ = "0.1.0-alpha.5" # x-release-please-version
@@ -24,6 +24,14 @@ from .models import (
24
24
  ModelsResourceWithStreamingResponse,
25
25
  AsyncModelsResourceWithStreamingResponse,
26
26
  )
27
+ from .datasets import (
28
+ DatasetsResource,
29
+ AsyncDatasetsResource,
30
+ DatasetsResourceWithRawResponse,
31
+ AsyncDatasetsResourceWithRawResponse,
32
+ DatasetsResourceWithStreamingResponse,
33
+ AsyncDatasetsResourceWithStreamingResponse,
34
+ )
27
35
  from .inference import (
28
36
  InferenceResource,
29
37
  AsyncInferenceResource,
@@ -32,14 +40,6 @@ from .inference import (
32
40
  InferenceResourceWithStreamingResponse,
33
41
  AsyncInferenceResourceWithStreamingResponse,
34
42
  )
35
- from .questions import (
36
- QuestionsResource,
37
- AsyncQuestionsResource,
38
- QuestionsResourceWithRawResponse,
39
- AsyncQuestionsResourceWithRawResponse,
40
- QuestionsResourceWithStreamingResponse,
41
- AsyncQuestionsResourceWithStreamingResponse,
42
- )
43
43
  from .completions import (
44
44
  CompletionsResource,
45
45
  AsyncCompletionsResource,
@@ -48,13 +48,13 @@ from .completions import (
48
48
  CompletionsResourceWithStreamingResponse,
49
49
  AsyncCompletionsResourceWithStreamingResponse,
50
50
  )
51
- from .question_sets import (
52
- QuestionSetsResource,
53
- AsyncQuestionSetsResource,
54
- QuestionSetsResourceWithRawResponse,
55
- AsyncQuestionSetsResourceWithRawResponse,
56
- QuestionSetsResourceWithStreamingResponse,
57
- AsyncQuestionSetsResourceWithStreamingResponse,
51
+ from .evaluations import (
52
+ EvaluationsResource,
53
+ AsyncEvaluationsResource,
54
+ EvaluationsResourceWithRawResponse,
55
+ AsyncEvaluationsResourceWithRawResponse,
56
+ EvaluationsResourceWithStreamingResponse,
57
+ AsyncEvaluationsResourceWithStreamingResponse,
58
58
  )
59
59
 
60
60
  __all__ = [
@@ -76,18 +76,6 @@ __all__ = [
76
76
  "AsyncInferenceResourceWithRawResponse",
77
77
  "InferenceResourceWithStreamingResponse",
78
78
  "AsyncInferenceResourceWithStreamingResponse",
79
- "QuestionsResource",
80
- "AsyncQuestionsResource",
81
- "QuestionsResourceWithRawResponse",
82
- "AsyncQuestionsResourceWithRawResponse",
83
- "QuestionsResourceWithStreamingResponse",
84
- "AsyncQuestionsResourceWithStreamingResponse",
85
- "QuestionSetsResource",
86
- "AsyncQuestionSetsResource",
87
- "QuestionSetsResourceWithRawResponse",
88
- "AsyncQuestionSetsResourceWithRawResponse",
89
- "QuestionSetsResourceWithStreamingResponse",
90
- "AsyncQuestionSetsResourceWithStreamingResponse",
91
79
  "FilesResource",
92
80
  "AsyncFilesResource",
93
81
  "FilesResourceWithRawResponse",
@@ -100,4 +88,16 @@ __all__ = [
100
88
  "AsyncModelsResourceWithRawResponse",
101
89
  "ModelsResourceWithStreamingResponse",
102
90
  "AsyncModelsResourceWithStreamingResponse",
91
+ "DatasetsResource",
92
+ "AsyncDatasetsResource",
93
+ "DatasetsResourceWithRawResponse",
94
+ "AsyncDatasetsResourceWithRawResponse",
95
+ "DatasetsResourceWithStreamingResponse",
96
+ "AsyncDatasetsResourceWithStreamingResponse",
97
+ "EvaluationsResource",
98
+ "AsyncEvaluationsResource",
99
+ "EvaluationsResourceWithRawResponse",
100
+ "AsyncEvaluationsResourceWithRawResponse",
101
+ "EvaluationsResourceWithStreamingResponse",
102
+ "AsyncEvaluationsResourceWithStreamingResponse",
103
103
  ]
@@ -474,53 +474,50 @@ class CompletionsResource(SyncAPIResource):
474
474
  extra_body: Body | None = None,
475
475
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
476
476
  ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
477
- return cast(
478
- CompletionCreateResponse,
479
- self._post(
480
- "/v5/chat/completions",
481
- body=maybe_transform(
482
- {
483
- "messages": messages,
484
- "model": model,
485
- "audio": audio,
486
- "frequency_penalty": frequency_penalty,
487
- "function_call": function_call,
488
- "functions": functions,
489
- "logit_bias": logit_bias,
490
- "logprobs": logprobs,
491
- "max_completion_tokens": max_completion_tokens,
492
- "max_tokens": max_tokens,
493
- "metadata": metadata,
494
- "modalities": modalities,
495
- "n": n,
496
- "parallel_tool_calls": parallel_tool_calls,
497
- "prediction": prediction,
498
- "presence_penalty": presence_penalty,
499
- "reasoning_effort": reasoning_effort,
500
- "response_format": response_format,
501
- "seed": seed,
502
- "stop": stop,
503
- "store": store,
504
- "stream": stream,
505
- "stream_options": stream_options,
506
- "temperature": temperature,
507
- "tool_choice": tool_choice,
508
- "tools": tools,
509
- "top_k": top_k,
510
- "top_logprobs": top_logprobs,
511
- "top_p": top_p,
512
- },
513
- completion_create_params.CompletionCreateParams,
514
- ),
515
- options=make_request_options(
516
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
517
- ),
518
- cast_to=cast(
519
- Any, CompletionCreateResponse
520
- ), # Union types cannot be passed in as arguments in the type system
521
- stream=stream or False,
522
- stream_cls=Stream[ChatCompletionChunk],
477
+ return self._post(
478
+ "/v5/chat/completions",
479
+ body=maybe_transform(
480
+ {
481
+ "messages": messages,
482
+ "model": model,
483
+ "audio": audio,
484
+ "frequency_penalty": frequency_penalty,
485
+ "function_call": function_call,
486
+ "functions": functions,
487
+ "logit_bias": logit_bias,
488
+ "logprobs": logprobs,
489
+ "max_completion_tokens": max_completion_tokens,
490
+ "max_tokens": max_tokens,
491
+ "metadata": metadata,
492
+ "modalities": modalities,
493
+ "n": n,
494
+ "parallel_tool_calls": parallel_tool_calls,
495
+ "prediction": prediction,
496
+ "presence_penalty": presence_penalty,
497
+ "reasoning_effort": reasoning_effort,
498
+ "response_format": response_format,
499
+ "seed": seed,
500
+ "stop": stop,
501
+ "store": store,
502
+ "stream": stream,
503
+ "stream_options": stream_options,
504
+ "temperature": temperature,
505
+ "tool_choice": tool_choice,
506
+ "tools": tools,
507
+ "top_k": top_k,
508
+ "top_logprobs": top_logprobs,
509
+ "top_p": top_p,
510
+ },
511
+ completion_create_params.CompletionCreateParams,
523
512
  ),
513
+ options=make_request_options(
514
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
515
+ ),
516
+ cast_to=cast(
517
+ Any, CompletionCreateResponse
518
+ ), # Union types cannot be passed in as arguments in the type system
519
+ stream=stream or False,
520
+ stream_cls=Stream[ChatCompletionChunk],
524
521
  )
525
522
 
526
523
 
@@ -968,53 +965,50 @@ class AsyncCompletionsResource(AsyncAPIResource):
968
965
  extra_body: Body | None = None,
969
966
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
970
967
  ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
971
- return cast(
972
- CompletionCreateResponse,
973
- await self._post(
974
- "/v5/chat/completions",
975
- body=await async_maybe_transform(
976
- {
977
- "messages": messages,
978
- "model": model,
979
- "audio": audio,
980
- "frequency_penalty": frequency_penalty,
981
- "function_call": function_call,
982
- "functions": functions,
983
- "logit_bias": logit_bias,
984
- "logprobs": logprobs,
985
- "max_completion_tokens": max_completion_tokens,
986
- "max_tokens": max_tokens,
987
- "metadata": metadata,
988
- "modalities": modalities,
989
- "n": n,
990
- "parallel_tool_calls": parallel_tool_calls,
991
- "prediction": prediction,
992
- "presence_penalty": presence_penalty,
993
- "reasoning_effort": reasoning_effort,
994
- "response_format": response_format,
995
- "seed": seed,
996
- "stop": stop,
997
- "store": store,
998
- "stream": stream,
999
- "stream_options": stream_options,
1000
- "temperature": temperature,
1001
- "tool_choice": tool_choice,
1002
- "tools": tools,
1003
- "top_k": top_k,
1004
- "top_logprobs": top_logprobs,
1005
- "top_p": top_p,
1006
- },
1007
- completion_create_params.CompletionCreateParams,
1008
- ),
1009
- options=make_request_options(
1010
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1011
- ),
1012
- cast_to=cast(
1013
- Any, CompletionCreateResponse
1014
- ), # Union types cannot be passed in as arguments in the type system
1015
- stream=stream or False,
1016
- stream_cls=AsyncStream[ChatCompletionChunk],
968
+ return await self._post(
969
+ "/v5/chat/completions",
970
+ body=await async_maybe_transform(
971
+ {
972
+ "messages": messages,
973
+ "model": model,
974
+ "audio": audio,
975
+ "frequency_penalty": frequency_penalty,
976
+ "function_call": function_call,
977
+ "functions": functions,
978
+ "logit_bias": logit_bias,
979
+ "logprobs": logprobs,
980
+ "max_completion_tokens": max_completion_tokens,
981
+ "max_tokens": max_tokens,
982
+ "metadata": metadata,
983
+ "modalities": modalities,
984
+ "n": n,
985
+ "parallel_tool_calls": parallel_tool_calls,
986
+ "prediction": prediction,
987
+ "presence_penalty": presence_penalty,
988
+ "reasoning_effort": reasoning_effort,
989
+ "response_format": response_format,
990
+ "seed": seed,
991
+ "stop": stop,
992
+ "store": store,
993
+ "stream": stream,
994
+ "stream_options": stream_options,
995
+ "temperature": temperature,
996
+ "tool_choice": tool_choice,
997
+ "tools": tools,
998
+ "top_k": top_k,
999
+ "top_logprobs": top_logprobs,
1000
+ "top_p": top_p,
1001
+ },
1002
+ completion_create_params.CompletionCreateParams,
1003
+ ),
1004
+ options=make_request_options(
1005
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1017
1006
  ),
1007
+ cast_to=cast(
1008
+ Any, CompletionCreateResponse
1009
+ ), # Union types cannot be passed in as arguments in the type system
1010
+ stream=stream or False,
1011
+ stream_cls=AsyncStream[ChatCompletionChunk],
1018
1012
  )
1019
1013
 
1020
1014