scale-gp-beta 0.1.0a6__py3-none-any.whl → 0.1.0a8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
scale_gp_beta/_models.py CHANGED
@@ -681,7 +681,7 @@ def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None:
681
681
  setattr(typ, "__pydantic_config__", config) # noqa: B010
682
682
 
683
683
 
684
- # our use of subclasssing here causes weirdness for type checkers,
684
+ # our use of subclassing here causes weirdness for type checkers,
685
685
  # so we just pretend that we don't subclass
686
686
  if TYPE_CHECKING:
687
687
  GenericModel = BaseModel
@@ -126,7 +126,7 @@ def _get_annotated_type(type_: type) -> type | None:
126
126
  def _maybe_transform_key(key: str, type_: type) -> str:
127
127
  """Transform the given `data` based on the annotations provided in `type_`.
128
128
 
129
- Note: this function only looks at `Annotated` types that contain `PropertInfo` metadata.
129
+ Note: this function only looks at `Annotated` types that contain `PropertyInfo` metadata.
130
130
  """
131
131
  annotated_type = _get_annotated_type(type_)
132
132
  if annotated_type is None:
scale_gp_beta/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "scale_gp_beta"
4
- __version__ = "0.1.0-alpha.6" # x-release-please-version
4
+ __version__ = "0.1.0-alpha.8" # x-release-please-version
@@ -25,6 +25,7 @@ from .._response import (
25
25
  from ..pagination import SyncCursorPage, AsyncCursorPage
26
26
  from .._base_client import AsyncPaginator, make_request_options
27
27
  from ..types.evaluation import Evaluation
28
+ from ..types.evaluation_task_param import EvaluationTaskParam
28
29
  from ..types.evaluation_archive_response import EvaluationArchiveResponse
29
30
 
30
31
  __all__ = ["EvaluationsResource", "AsyncEvaluationsResource"]
@@ -57,7 +58,7 @@ class EvaluationsResource(SyncAPIResource):
57
58
  data: Iterable[Dict[str, object]],
58
59
  name: str,
59
60
  description: str | NotGiven = NOT_GIVEN,
60
- tasks: Iterable[evaluation_create_params.EvaluationStandaloneCreateRequestTask] | NotGiven = NOT_GIVEN,
61
+ tasks: Iterable[EvaluationTaskParam] | NotGiven = NOT_GIVEN,
61
62
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
62
63
  # The extra values given here take precedence over values defined on the client or passed to this method.
63
64
  extra_headers: Headers | None = None,
@@ -87,11 +88,11 @@ class EvaluationsResource(SyncAPIResource):
87
88
  def create(
88
89
  self,
89
90
  *,
90
- data: Iterable[evaluation_create_params.EvaluationFromDatasetCreateRequestData],
91
91
  dataset_id: str,
92
92
  name: str,
93
+ data: Iterable[evaluation_create_params.EvaluationFromDatasetCreateRequestData] | NotGiven = NOT_GIVEN,
93
94
  description: str | NotGiven = NOT_GIVEN,
94
- tasks: Iterable[evaluation_create_params.EvaluationFromDatasetCreateRequestTask] | NotGiven = NOT_GIVEN,
95
+ tasks: Iterable[EvaluationTaskParam] | NotGiven = NOT_GIVEN,
95
96
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
96
97
  # The extra values given here take precedence over values defined on the client or passed to this method.
97
98
  extra_headers: Headers | None = None,
@@ -103,10 +104,10 @@ class EvaluationsResource(SyncAPIResource):
103
104
  Create Evaluation
104
105
 
105
106
  Args:
106
- data: Items to be evaluated, including references to the input dataset items
107
-
108
107
  dataset_id: The ID of the dataset containing the items referenced by the `data` field
109
108
 
109
+ data: Items to be evaluated, including references to the input dataset items
110
+
110
111
  tasks: Tasks allow you to augment and evaluate your data
111
112
 
112
113
  extra_headers: Send extra headers
@@ -127,7 +128,7 @@ class EvaluationsResource(SyncAPIResource):
127
128
  dataset: evaluation_create_params.EvaluationWithDatasetCreateRequestDataset,
128
129
  name: str,
129
130
  description: str | NotGiven = NOT_GIVEN,
130
- tasks: Iterable[evaluation_create_params.EvaluationWithDatasetCreateRequestTask] | NotGiven = NOT_GIVEN,
131
+ tasks: Iterable[EvaluationTaskParam] | NotGiven = NOT_GIVEN,
131
132
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
132
133
  # The extra values given here take precedence over values defined on the client or passed to this method.
133
134
  extra_headers: Headers | None = None,
@@ -155,14 +156,16 @@ class EvaluationsResource(SyncAPIResource):
155
156
  """
156
157
  ...
157
158
 
158
- @required_args(["data", "name"], ["data", "dataset_id", "name"], ["data", "dataset", "name"])
159
+ @required_args(["data", "name"], ["dataset_id", "name"], ["data", "dataset", "name"])
159
160
  def create(
160
161
  self,
161
162
  *,
162
- data: Iterable[Dict[str, object]] | Iterable[evaluation_create_params.EvaluationFromDatasetCreateRequestData],
163
+ data: Iterable[Dict[str, object]]
164
+ | Iterable[evaluation_create_params.EvaluationFromDatasetCreateRequestData]
165
+ | NotGiven = NOT_GIVEN,
163
166
  name: str,
164
167
  description: str | NotGiven = NOT_GIVEN,
165
- tasks: Iterable[evaluation_create_params.EvaluationStandaloneCreateRequestTask] | NotGiven = NOT_GIVEN,
168
+ tasks: Iterable[EvaluationTaskParam] | NotGiven = NOT_GIVEN,
166
169
  dataset_id: str | NotGiven = NOT_GIVEN,
167
170
  dataset: evaluation_create_params.EvaluationWithDatasetCreateRequestDataset | NotGiven = NOT_GIVEN,
168
171
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -339,7 +342,7 @@ class AsyncEvaluationsResource(AsyncAPIResource):
339
342
  data: Iterable[Dict[str, object]],
340
343
  name: str,
341
344
  description: str | NotGiven = NOT_GIVEN,
342
- tasks: Iterable[evaluation_create_params.EvaluationStandaloneCreateRequestTask] | NotGiven = NOT_GIVEN,
345
+ tasks: Iterable[EvaluationTaskParam] | NotGiven = NOT_GIVEN,
343
346
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
344
347
  # The extra values given here take precedence over values defined on the client or passed to this method.
345
348
  extra_headers: Headers | None = None,
@@ -369,11 +372,11 @@ class AsyncEvaluationsResource(AsyncAPIResource):
369
372
  async def create(
370
373
  self,
371
374
  *,
372
- data: Iterable[evaluation_create_params.EvaluationFromDatasetCreateRequestData],
373
375
  dataset_id: str,
374
376
  name: str,
377
+ data: Iterable[evaluation_create_params.EvaluationFromDatasetCreateRequestData] | NotGiven = NOT_GIVEN,
375
378
  description: str | NotGiven = NOT_GIVEN,
376
- tasks: Iterable[evaluation_create_params.EvaluationFromDatasetCreateRequestTask] | NotGiven = NOT_GIVEN,
379
+ tasks: Iterable[EvaluationTaskParam] | NotGiven = NOT_GIVEN,
377
380
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
378
381
  # The extra values given here take precedence over values defined on the client or passed to this method.
379
382
  extra_headers: Headers | None = None,
@@ -385,10 +388,10 @@ class AsyncEvaluationsResource(AsyncAPIResource):
385
388
  Create Evaluation
386
389
 
387
390
  Args:
388
- data: Items to be evaluated, including references to the input dataset items
389
-
390
391
  dataset_id: The ID of the dataset containing the items referenced by the `data` field
391
392
 
393
+ data: Items to be evaluated, including references to the input dataset items
394
+
392
395
  tasks: Tasks allow you to augment and evaluate your data
393
396
 
394
397
  extra_headers: Send extra headers
@@ -409,7 +412,7 @@ class AsyncEvaluationsResource(AsyncAPIResource):
409
412
  dataset: evaluation_create_params.EvaluationWithDatasetCreateRequestDataset,
410
413
  name: str,
411
414
  description: str | NotGiven = NOT_GIVEN,
412
- tasks: Iterable[evaluation_create_params.EvaluationWithDatasetCreateRequestTask] | NotGiven = NOT_GIVEN,
415
+ tasks: Iterable[EvaluationTaskParam] | NotGiven = NOT_GIVEN,
413
416
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
414
417
  # The extra values given here take precedence over values defined on the client or passed to this method.
415
418
  extra_headers: Headers | None = None,
@@ -437,14 +440,16 @@ class AsyncEvaluationsResource(AsyncAPIResource):
437
440
  """
438
441
  ...
439
442
 
440
- @required_args(["data", "name"], ["data", "dataset_id", "name"], ["data", "dataset", "name"])
443
+ @required_args(["data", "name"], ["dataset_id", "name"], ["data", "dataset", "name"])
441
444
  async def create(
442
445
  self,
443
446
  *,
444
- data: Iterable[Dict[str, object]] | Iterable[evaluation_create_params.EvaluationFromDatasetCreateRequestData],
447
+ data: Iterable[Dict[str, object]]
448
+ | Iterable[evaluation_create_params.EvaluationFromDatasetCreateRequestData]
449
+ | NotGiven = NOT_GIVEN,
445
450
  name: str,
446
451
  description: str | NotGiven = NOT_GIVEN,
447
- tasks: Iterable[evaluation_create_params.EvaluationStandaloneCreateRequestTask] | NotGiven = NOT_GIVEN,
452
+ tasks: Iterable[EvaluationTaskParam] | NotGiven = NOT_GIVEN,
448
453
  dataset_id: str | NotGiven = NOT_GIVEN,
449
454
  dataset: evaluation_create_params.EvaluationWithDatasetCreateRequestDataset | NotGiven = NOT_GIVEN,
450
455
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -22,6 +22,7 @@ from .file_delete_response import FileDeleteResponse as FileDeleteResponse
22
22
  from .inference_model_list import InferenceModelList as InferenceModelList
23
23
  from .dataset_create_params import DatasetCreateParams as DatasetCreateParams
24
24
  from .dataset_update_params import DatasetUpdateParams as DatasetUpdateParams
25
+ from .evaluation_task_param import EvaluationTaskParam as EvaluationTaskParam
25
26
  from .model_delete_response import ModelDeleteResponse as ModelDeleteResponse
26
27
  from .evaluation_list_params import EvaluationListParams as EvaluationListParams
27
28
  from .dataset_delete_response import DatasetDeleteResponse as DatasetDeleteResponse
@@ -10,6 +10,8 @@ __all__ = [
10
10
  "ChatCompletion",
11
11
  "Choice",
12
12
  "ChoiceMessage",
13
+ "ChoiceMessageAnnotation",
14
+ "ChoiceMessageAnnotationURLCitation",
13
15
  "ChoiceMessageAudio",
14
16
  "ChoiceMessageFunctionCall",
15
17
  "ChoiceMessageToolCall",
@@ -25,6 +27,34 @@ __all__ = [
25
27
  ]
26
28
 
27
29
 
30
+ class ChoiceMessageAnnotationURLCitation(BaseModel):
31
+ end_index: int
32
+
33
+ start_index: int
34
+
35
+ title: str
36
+
37
+ url: str
38
+
39
+ if TYPE_CHECKING:
40
+ # Stub to indicate that arbitrary properties are accepted.
41
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
42
+ # `getattr(obj, '$type')`
43
+ def __getattr__(self, attr: str) -> object: ...
44
+
45
+
46
+ class ChoiceMessageAnnotation(BaseModel):
47
+ type: Literal["url_citation"]
48
+
49
+ url_citation: ChoiceMessageAnnotationURLCitation
50
+
51
+ if TYPE_CHECKING:
52
+ # Stub to indicate that arbitrary properties are accepted.
53
+ # To access properties that are not valid identifiers you can use `getattr`, e.g.
54
+ # `getattr(obj, '$type')`
55
+ def __getattr__(self, attr: str) -> object: ...
56
+
57
+
28
58
  class ChoiceMessageAudio(BaseModel):
29
59
  id: str
30
60
 
@@ -82,6 +112,8 @@ class ChoiceMessageToolCall(BaseModel):
82
112
  class ChoiceMessage(BaseModel):
83
113
  role: Literal["assistant"]
84
114
 
115
+ annotations: Optional[List[ChoiceMessageAnnotation]] = None
116
+
85
117
  audio: Optional[ChoiceMessageAudio] = None
86
118
 
87
119
  content: Optional[str] = None
@@ -71,7 +71,7 @@ class ChoiceDelta(BaseModel):
71
71
 
72
72
  refusal: Optional[str] = None
73
73
 
74
- role: Optional[Literal["system", "user", "assistant", "tool"]] = None
74
+ role: Optional[Literal["developer", "system", "user", "assistant", "tool"]] = None
75
75
 
76
76
  tool_calls: Optional[List[ChoiceDeltaToolCall]] = None
77
77
 
@@ -3,57 +3,17 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from typing import Dict, List, Union, Iterable
6
- from typing_extensions import Literal, Required, TypeAlias, TypedDict
6
+ from typing_extensions import Required, TypeAlias, TypedDict
7
+
8
+ from .evaluation_task_param import EvaluationTaskParam
7
9
 
8
10
  __all__ = [
9
11
  "EvaluationCreateParams",
10
12
  "EvaluationStandaloneCreateRequest",
11
- "EvaluationStandaloneCreateRequestTask",
12
- "EvaluationStandaloneCreateRequestTaskChatCompletionEvaluationTaskRequest",
13
- "EvaluationStandaloneCreateRequestTaskChatCompletionEvaluationTaskRequestConfiguration",
14
- "EvaluationStandaloneCreateRequestTaskGenericInferenceEvaluationTaskRequest",
15
- "EvaluationStandaloneCreateRequestTaskGenericInferenceEvaluationTaskRequestConfiguration",
16
- "EvaluationStandaloneCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfiguration",
17
- "EvaluationStandaloneCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfigurationLaunchInferenceConfiguration",
18
- "EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequest",
19
- "EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfiguration",
20
- "EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationHistoryUnionMember0",
21
- "EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverrides",
22
- "EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverrides",
23
- "EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesInitialState",
24
- "EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesPartialTrace",
25
13
  "EvaluationFromDatasetCreateRequest",
26
14
  "EvaluationFromDatasetCreateRequestData",
27
- "EvaluationFromDatasetCreateRequestTask",
28
- "EvaluationFromDatasetCreateRequestTaskChatCompletionEvaluationTaskRequest",
29
- "EvaluationFromDatasetCreateRequestTaskChatCompletionEvaluationTaskRequestConfiguration",
30
- "EvaluationFromDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequest",
31
- "EvaluationFromDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfiguration",
32
- "EvaluationFromDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfiguration",
33
- "EvaluationFromDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfigurationLaunchInferenceConfiguration",
34
- "EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequest",
35
- "EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfiguration",
36
- "EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationHistoryUnionMember0",
37
- "EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverrides",
38
- "EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverrides",
39
- "EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesInitialState",
40
- "EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesPartialTrace",
41
15
  "EvaluationWithDatasetCreateRequest",
42
16
  "EvaluationWithDatasetCreateRequestDataset",
43
- "EvaluationWithDatasetCreateRequestTask",
44
- "EvaluationWithDatasetCreateRequestTaskChatCompletionEvaluationTaskRequest",
45
- "EvaluationWithDatasetCreateRequestTaskChatCompletionEvaluationTaskRequestConfiguration",
46
- "EvaluationWithDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequest",
47
- "EvaluationWithDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfiguration",
48
- "EvaluationWithDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfiguration",
49
- "EvaluationWithDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfigurationLaunchInferenceConfiguration",
50
- "EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequest",
51
- "EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfiguration",
52
- "EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationHistoryUnionMember0",
53
- "EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverrides",
54
- "EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverrides",
55
- "EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesInitialState",
56
- "EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesPartialTrace",
57
17
  ]
58
18
 
59
19
 
@@ -65,224 +25,22 @@ class EvaluationStandaloneCreateRequest(TypedDict, total=False):
65
25
 
66
26
  description: str
67
27
 
68
- tasks: Iterable[EvaluationStandaloneCreateRequestTask]
28
+ tasks: Iterable[EvaluationTaskParam]
69
29
  """Tasks allow you to augment and evaluate your data"""
70
30
 
71
31
 
72
- class EvaluationStandaloneCreateRequestTaskChatCompletionEvaluationTaskRequestConfigurationTyped(
73
- TypedDict, total=False
74
- ):
75
- messages: Required[Union[Iterable[Dict[str, object]], str]]
76
-
77
- model: Required[str]
78
-
79
- audio: Union[Dict[str, object], str]
80
-
81
- frequency_penalty: Union[float, str]
82
-
83
- function_call: Union[Dict[str, object], str]
84
-
85
- functions: Union[Iterable[Dict[str, object]], str]
86
-
87
- logit_bias: Union[Dict[str, int], str]
88
-
89
- logprobs: Union[bool, str]
90
-
91
- max_completion_tokens: Union[int, str]
92
-
93
- max_tokens: Union[int, str]
94
-
95
- metadata: Union[Dict[str, str], str]
96
-
97
- modalities: Union[List[str], str]
98
-
99
- n: Union[int, str]
100
-
101
- parallel_tool_calls: Union[bool, str]
102
-
103
- prediction: Union[Dict[str, object], str]
104
-
105
- presence_penalty: Union[float, str]
106
-
107
- reasoning_effort: str
108
-
109
- response_format: Union[Dict[str, object], str]
110
-
111
- seed: Union[int, str]
112
-
113
- stop: str
114
-
115
- store: Union[bool, str]
116
-
117
- temperature: Union[float, str]
118
-
119
- tool_choice: str
120
-
121
- tools: Union[Iterable[Dict[str, object]], str]
122
-
123
- top_k: Union[int, str]
124
-
125
- top_logprobs: Union[int, str]
126
-
127
- top_p: Union[float, str]
128
-
129
-
130
- EvaluationStandaloneCreateRequestTaskChatCompletionEvaluationTaskRequestConfiguration: TypeAlias = Union[
131
- EvaluationStandaloneCreateRequestTaskChatCompletionEvaluationTaskRequestConfigurationTyped, Dict[str, object]
132
- ]
133
-
134
-
135
- class EvaluationStandaloneCreateRequestTaskChatCompletionEvaluationTaskRequest(TypedDict, total=False):
136
- configuration: Required[EvaluationStandaloneCreateRequestTaskChatCompletionEvaluationTaskRequestConfiguration]
137
-
138
- alias: str
139
- """Alias to title the results column. Defaults to the `task_type`"""
140
-
141
- task_type: Literal["chat_completion"]
142
-
143
-
144
- class EvaluationStandaloneCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfigurationLaunchInferenceConfiguration(
145
- TypedDict, total=False
146
- ):
147
- num_retries: int
148
-
149
- timeout_seconds: int
150
-
151
-
152
- EvaluationStandaloneCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfiguration: TypeAlias = Union[
153
- EvaluationStandaloneCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfigurationLaunchInferenceConfiguration,
154
- str,
155
- ]
156
-
157
-
158
- class EvaluationStandaloneCreateRequestTaskGenericInferenceEvaluationTaskRequestConfiguration(TypedDict, total=False):
159
- model: Required[str]
160
-
161
- args: Union[Dict[str, object], str]
162
-
163
- inference_configuration: (
164
- EvaluationStandaloneCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfiguration
165
- )
166
-
167
-
168
- class EvaluationStandaloneCreateRequestTaskGenericInferenceEvaluationTaskRequest(TypedDict, total=False):
169
- configuration: Required[EvaluationStandaloneCreateRequestTaskGenericInferenceEvaluationTaskRequestConfiguration]
170
-
171
- alias: str
172
- """Alias to title the results column. Defaults to the `task_type`"""
173
-
174
- task_type: Literal["inference"]
175
-
176
-
177
- class EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationHistoryUnionMember0(
178
- TypedDict, total=False
179
- ):
180
- request: Required[str]
181
- """Request inputs"""
182
-
183
- response: Required[str]
184
- """Response outputs"""
185
-
186
- session_data: Dict[str, object]
187
- """Session data corresponding to the request response pair"""
188
-
189
-
190
- class EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesInitialState(
191
- TypedDict, total=False
192
- ):
193
- current_node: Required[str]
194
-
195
- state: Required[Dict[str, object]]
196
-
197
-
198
- class EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesPartialTrace(
199
- TypedDict, total=False
200
- ):
201
- duration_ms: Required[int]
202
-
203
- node_id: Required[str]
204
-
205
- operation_input: Required[str]
206
-
207
- operation_output: Required[str]
208
-
209
- operation_type: Required[str]
210
-
211
- start_timestamp: Required[str]
212
-
213
- workflow_id: Required[str]
214
-
215
- operation_metadata: Dict[str, object]
216
-
217
-
218
- class EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverrides(
219
- TypedDict, total=False
220
- ):
221
- concurrent: bool
222
-
223
- initial_state: EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesInitialState
224
-
225
- partial_trace: Iterable[
226
- EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesPartialTrace
227
- ]
228
-
229
- use_channels: bool
230
-
231
-
232
- EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverrides: TypeAlias = Union[
233
- EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverrides,
234
- str,
235
- ]
236
-
237
-
238
- class EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfiguration(
239
- TypedDict, total=False
240
- ):
241
- application_variant_id: Required[str]
242
-
243
- inputs: Required[Union[Dict[str, object], str]]
244
-
245
- history: Union[
246
- Iterable[
247
- EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationHistoryUnionMember0
248
- ],
249
- str,
250
- ]
251
-
252
- operation_metadata: Union[Dict[str, object], str]
253
-
254
- overrides: EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverrides
255
- """Execution override options for agentic applications"""
256
-
257
-
258
- class EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequest(TypedDict, total=False):
259
- configuration: Required[EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfiguration]
260
-
261
- alias: str
262
- """Alias to title the results column. Defaults to the `task_type`"""
263
-
264
- task_type: Literal["application_variant"]
265
-
266
-
267
- EvaluationStandaloneCreateRequestTask: TypeAlias = Union[
268
- EvaluationStandaloneCreateRequestTaskChatCompletionEvaluationTaskRequest,
269
- EvaluationStandaloneCreateRequestTaskGenericInferenceEvaluationTaskRequest,
270
- EvaluationStandaloneCreateRequestTaskApplicationVariantV1EvaluationTaskRequest,
271
- ]
272
-
273
-
274
32
  class EvaluationFromDatasetCreateRequest(TypedDict, total=False):
275
- data: Required[Iterable[EvaluationFromDatasetCreateRequestData]]
276
- """Items to be evaluated, including references to the input dataset items"""
277
-
278
33
  dataset_id: Required[str]
279
34
  """The ID of the dataset containing the items referenced by the `data` field"""
280
35
 
281
36
  name: Required[str]
282
37
 
38
+ data: Iterable[EvaluationFromDatasetCreateRequestData]
39
+ """Items to be evaluated, including references to the input dataset items"""
40
+
283
41
  description: str
284
42
 
285
- tasks: Iterable[EvaluationFromDatasetCreateRequestTask]
43
+ tasks: Iterable[EvaluationTaskParam]
286
44
  """Tasks allow you to augment and evaluate your data"""
287
45
 
288
46
 
@@ -295,210 +53,6 @@ EvaluationFromDatasetCreateRequestData: TypeAlias = Union[
295
53
  ]
296
54
 
297
55
 
298
- class EvaluationFromDatasetCreateRequestTaskChatCompletionEvaluationTaskRequestConfigurationTyped(
299
- TypedDict, total=False
300
- ):
301
- messages: Required[Union[Iterable[Dict[str, object]], str]]
302
-
303
- model: Required[str]
304
-
305
- audio: Union[Dict[str, object], str]
306
-
307
- frequency_penalty: Union[float, str]
308
-
309
- function_call: Union[Dict[str, object], str]
310
-
311
- functions: Union[Iterable[Dict[str, object]], str]
312
-
313
- logit_bias: Union[Dict[str, int], str]
314
-
315
- logprobs: Union[bool, str]
316
-
317
- max_completion_tokens: Union[int, str]
318
-
319
- max_tokens: Union[int, str]
320
-
321
- metadata: Union[Dict[str, str], str]
322
-
323
- modalities: Union[List[str], str]
324
-
325
- n: Union[int, str]
326
-
327
- parallel_tool_calls: Union[bool, str]
328
-
329
- prediction: Union[Dict[str, object], str]
330
-
331
- presence_penalty: Union[float, str]
332
-
333
- reasoning_effort: str
334
-
335
- response_format: Union[Dict[str, object], str]
336
-
337
- seed: Union[int, str]
338
-
339
- stop: str
340
-
341
- store: Union[bool, str]
342
-
343
- temperature: Union[float, str]
344
-
345
- tool_choice: str
346
-
347
- tools: Union[Iterable[Dict[str, object]], str]
348
-
349
- top_k: Union[int, str]
350
-
351
- top_logprobs: Union[int, str]
352
-
353
- top_p: Union[float, str]
354
-
355
-
356
- EvaluationFromDatasetCreateRequestTaskChatCompletionEvaluationTaskRequestConfiguration: TypeAlias = Union[
357
- EvaluationFromDatasetCreateRequestTaskChatCompletionEvaluationTaskRequestConfigurationTyped, Dict[str, object]
358
- ]
359
-
360
-
361
- class EvaluationFromDatasetCreateRequestTaskChatCompletionEvaluationTaskRequest(TypedDict, total=False):
362
- configuration: Required[EvaluationFromDatasetCreateRequestTaskChatCompletionEvaluationTaskRequestConfiguration]
363
-
364
- alias: str
365
- """Alias to title the results column. Defaults to the `task_type`"""
366
-
367
- task_type: Literal["chat_completion"]
368
-
369
-
370
- class EvaluationFromDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfigurationLaunchInferenceConfiguration(
371
- TypedDict, total=False
372
- ):
373
- num_retries: int
374
-
375
- timeout_seconds: int
376
-
377
-
378
- EvaluationFromDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfiguration: TypeAlias = Union[
379
- EvaluationFromDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfigurationLaunchInferenceConfiguration,
380
- str,
381
- ]
382
-
383
-
384
- class EvaluationFromDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfiguration(TypedDict, total=False):
385
- model: Required[str]
386
-
387
- args: Union[Dict[str, object], str]
388
-
389
- inference_configuration: (
390
- EvaluationFromDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfiguration
391
- )
392
-
393
-
394
- class EvaluationFromDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequest(TypedDict, total=False):
395
- configuration: Required[EvaluationFromDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfiguration]
396
-
397
- alias: str
398
- """Alias to title the results column. Defaults to the `task_type`"""
399
-
400
- task_type: Literal["inference"]
401
-
402
-
403
- class EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationHistoryUnionMember0(
404
- TypedDict, total=False
405
- ):
406
- request: Required[str]
407
- """Request inputs"""
408
-
409
- response: Required[str]
410
- """Response outputs"""
411
-
412
- session_data: Dict[str, object]
413
- """Session data corresponding to the request response pair"""
414
-
415
-
416
- class EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesInitialState(
417
- TypedDict, total=False
418
- ):
419
- current_node: Required[str]
420
-
421
- state: Required[Dict[str, object]]
422
-
423
-
424
- class EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesPartialTrace(
425
- TypedDict, total=False
426
- ):
427
- duration_ms: Required[int]
428
-
429
- node_id: Required[str]
430
-
431
- operation_input: Required[str]
432
-
433
- operation_output: Required[str]
434
-
435
- operation_type: Required[str]
436
-
437
- start_timestamp: Required[str]
438
-
439
- workflow_id: Required[str]
440
-
441
- operation_metadata: Dict[str, object]
442
-
443
-
444
- class EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverrides(
445
- TypedDict, total=False
446
- ):
447
- concurrent: bool
448
-
449
- initial_state: EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesInitialState
450
-
451
- partial_trace: Iterable[
452
- EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesPartialTrace
453
- ]
454
-
455
- use_channels: bool
456
-
457
-
458
- EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverrides: TypeAlias = Union[
459
- EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverrides,
460
- str,
461
- ]
462
-
463
-
464
- class EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfiguration(
465
- TypedDict, total=False
466
- ):
467
- application_variant_id: Required[str]
468
-
469
- inputs: Required[Union[Dict[str, object], str]]
470
-
471
- history: Union[
472
- Iterable[
473
- EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationHistoryUnionMember0
474
- ],
475
- str,
476
- ]
477
-
478
- operation_metadata: Union[Dict[str, object], str]
479
-
480
- overrides: EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverrides
481
- """Execution override options for agentic applications"""
482
-
483
-
484
- class EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequest(TypedDict, total=False):
485
- configuration: Required[
486
- EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfiguration
487
- ]
488
-
489
- alias: str
490
- """Alias to title the results column. Defaults to the `task_type`"""
491
-
492
- task_type: Literal["application_variant"]
493
-
494
-
495
- EvaluationFromDatasetCreateRequestTask: TypeAlias = Union[
496
- EvaluationFromDatasetCreateRequestTaskChatCompletionEvaluationTaskRequest,
497
- EvaluationFromDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequest,
498
- EvaluationFromDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequest,
499
- ]
500
-
501
-
502
56
  class EvaluationWithDatasetCreateRequest(TypedDict, total=False):
503
57
  data: Required[Iterable[Dict[str, object]]]
504
58
  """Items to be evaluated"""
@@ -510,7 +64,7 @@ class EvaluationWithDatasetCreateRequest(TypedDict, total=False):
510
64
 
511
65
  description: str
512
66
 
513
- tasks: Iterable[EvaluationWithDatasetCreateRequestTask]
67
+ tasks: Iterable[EvaluationTaskParam]
514
68
  """Tasks allow you to augment and evaluate your data"""
515
69
 
516
70
 
@@ -526,209 +80,6 @@ class EvaluationWithDatasetCreateRequestDataset(TypedDict, total=False):
526
80
  """
527
81
 
528
82
 
529
- class EvaluationWithDatasetCreateRequestTaskChatCompletionEvaluationTaskRequestConfigurationTyped(
530
- TypedDict, total=False
531
- ):
532
- messages: Required[Union[Iterable[Dict[str, object]], str]]
533
-
534
- model: Required[str]
535
-
536
- audio: Union[Dict[str, object], str]
537
-
538
- frequency_penalty: Union[float, str]
539
-
540
- function_call: Union[Dict[str, object], str]
541
-
542
- functions: Union[Iterable[Dict[str, object]], str]
543
-
544
- logit_bias: Union[Dict[str, int], str]
545
-
546
- logprobs: Union[bool, str]
547
-
548
- max_completion_tokens: Union[int, str]
549
-
550
- max_tokens: Union[int, str]
551
-
552
- metadata: Union[Dict[str, str], str]
553
-
554
- modalities: Union[List[str], str]
555
-
556
- n: Union[int, str]
557
-
558
- parallel_tool_calls: Union[bool, str]
559
-
560
- prediction: Union[Dict[str, object], str]
561
-
562
- presence_penalty: Union[float, str]
563
-
564
- reasoning_effort: str
565
-
566
- response_format: Union[Dict[str, object], str]
567
-
568
- seed: Union[int, str]
569
-
570
- stop: str
571
-
572
- store: Union[bool, str]
573
-
574
- temperature: Union[float, str]
575
-
576
- tool_choice: str
577
-
578
- tools: Union[Iterable[Dict[str, object]], str]
579
-
580
- top_k: Union[int, str]
581
-
582
- top_logprobs: Union[int, str]
583
-
584
- top_p: Union[float, str]
585
-
586
-
587
- EvaluationWithDatasetCreateRequestTaskChatCompletionEvaluationTaskRequestConfiguration: TypeAlias = Union[
588
- EvaluationWithDatasetCreateRequestTaskChatCompletionEvaluationTaskRequestConfigurationTyped, Dict[str, object]
589
- ]
590
-
591
-
592
- class EvaluationWithDatasetCreateRequestTaskChatCompletionEvaluationTaskRequest(TypedDict, total=False):
593
- configuration: Required[EvaluationWithDatasetCreateRequestTaskChatCompletionEvaluationTaskRequestConfiguration]
594
-
595
- alias: str
596
- """Alias to title the results column. Defaults to the `task_type`"""
597
-
598
- task_type: Literal["chat_completion"]
599
-
600
-
601
- class EvaluationWithDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfigurationLaunchInferenceConfiguration(
602
- TypedDict, total=False
603
- ):
604
- num_retries: int
605
-
606
- timeout_seconds: int
607
-
608
-
609
- EvaluationWithDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfiguration: TypeAlias = Union[
610
- EvaluationWithDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfigurationLaunchInferenceConfiguration,
611
- str,
612
- ]
613
-
614
-
615
- class EvaluationWithDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfiguration(TypedDict, total=False):
616
- model: Required[str]
617
-
618
- args: Union[Dict[str, object], str]
619
-
620
- inference_configuration: (
621
- EvaluationWithDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfigurationInferenceConfiguration
622
- )
623
-
624
-
625
- class EvaluationWithDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequest(TypedDict, total=False):
626
- configuration: Required[EvaluationWithDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequestConfiguration]
627
-
628
- alias: str
629
- """Alias to title the results column. Defaults to the `task_type`"""
630
-
631
- task_type: Literal["inference"]
632
-
633
-
634
- class EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationHistoryUnionMember0(
635
- TypedDict, total=False
636
- ):
637
- request: Required[str]
638
- """Request inputs"""
639
-
640
- response: Required[str]
641
- """Response outputs"""
642
-
643
- session_data: Dict[str, object]
644
- """Session data corresponding to the request response pair"""
645
-
646
-
647
- class EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesInitialState(
648
- TypedDict, total=False
649
- ):
650
- current_node: Required[str]
651
-
652
- state: Required[Dict[str, object]]
653
-
654
-
655
- class EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesPartialTrace(
656
- TypedDict, total=False
657
- ):
658
- duration_ms: Required[int]
659
-
660
- node_id: Required[str]
661
-
662
- operation_input: Required[str]
663
-
664
- operation_output: Required[str]
665
-
666
- operation_type: Required[str]
667
-
668
- start_timestamp: Required[str]
669
-
670
- workflow_id: Required[str]
671
-
672
- operation_metadata: Dict[str, object]
673
-
674
-
675
- class EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverrides(
676
- TypedDict, total=False
677
- ):
678
- concurrent: bool
679
-
680
- initial_state: EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesInitialState
681
-
682
- partial_trace: Iterable[
683
- EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesPartialTrace
684
- ]
685
-
686
- use_channels: bool
687
-
688
-
689
- EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverrides: TypeAlias = Union[
690
- EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverrides,
691
- str,
692
- ]
693
-
694
-
695
- class EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfiguration(
696
- TypedDict, total=False
697
- ):
698
- application_variant_id: Required[str]
699
-
700
- inputs: Required[Union[Dict[str, object], str]]
701
-
702
- history: Union[
703
- Iterable[
704
- EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationHistoryUnionMember0
705
- ],
706
- str,
707
- ]
708
-
709
- operation_metadata: Union[Dict[str, object], str]
710
-
711
- overrides: EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfigurationOverrides
712
- """Execution override options for agentic applications"""
713
-
714
-
715
- class EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequest(TypedDict, total=False):
716
- configuration: Required[
717
- EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequestConfiguration
718
- ]
719
-
720
- alias: str
721
- """Alias to title the results column. Defaults to the `task_type`"""
722
-
723
- task_type: Literal["application_variant"]
724
-
725
-
726
- EvaluationWithDatasetCreateRequestTask: TypeAlias = Union[
727
- EvaluationWithDatasetCreateRequestTaskChatCompletionEvaluationTaskRequest,
728
- EvaluationWithDatasetCreateRequestTaskGenericInferenceEvaluationTaskRequest,
729
- EvaluationWithDatasetCreateRequestTaskApplicationVariantV1EvaluationTaskRequest,
730
- ]
731
-
732
83
  EvaluationCreateParams: TypeAlias = Union[
733
84
  EvaluationStandaloneCreateRequest, EvaluationFromDatasetCreateRequest, EvaluationWithDatasetCreateRequest
734
85
  ]
@@ -0,0 +1,212 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict, List, Union, Iterable
6
+ from typing_extensions import Literal, Required, TypeAlias, TypedDict
7
+
8
+ __all__ = [
9
+ "EvaluationTaskParam",
10
+ "ChatCompletionEvaluationTaskRequest",
11
+ "ChatCompletionEvaluationTaskRequestConfiguration",
12
+ "GenericInferenceEvaluationTaskRequest",
13
+ "GenericInferenceEvaluationTaskRequestConfiguration",
14
+ "GenericInferenceEvaluationTaskRequestConfigurationInferenceConfiguration",
15
+ "GenericInferenceEvaluationTaskRequestConfigurationInferenceConfigurationLaunchInferenceConfiguration",
16
+ "ApplicationVariantV1EvaluationTaskRequest",
17
+ "ApplicationVariantV1EvaluationTaskRequestConfiguration",
18
+ "ApplicationVariantV1EvaluationTaskRequestConfigurationHistoryUnionMember0",
19
+ "ApplicationVariantV1EvaluationTaskRequestConfigurationOverrides",
20
+ "ApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverrides",
21
+ "ApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesInitialState",
22
+ "ApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesPartialTrace",
23
+ ]
24
+
25
+
26
+ class ChatCompletionEvaluationTaskRequestConfigurationTyped(TypedDict, total=False):
27
+ messages: Required[Union[Iterable[Dict[str, object]], str]]
28
+
29
+ model: Required[str]
30
+
31
+ audio: Union[Dict[str, object], str]
32
+
33
+ frequency_penalty: Union[float, str]
34
+
35
+ function_call: Union[Dict[str, object], str]
36
+
37
+ functions: Union[Iterable[Dict[str, object]], str]
38
+
39
+ logit_bias: Union[Dict[str, int], str]
40
+
41
+ logprobs: Union[bool, str]
42
+
43
+ max_completion_tokens: Union[int, str]
44
+
45
+ max_tokens: Union[int, str]
46
+
47
+ metadata: Union[Dict[str, str], str]
48
+
49
+ modalities: Union[List[str], str]
50
+
51
+ n: Union[int, str]
52
+
53
+ parallel_tool_calls: Union[bool, str]
54
+
55
+ prediction: Union[Dict[str, object], str]
56
+
57
+ presence_penalty: Union[float, str]
58
+
59
+ reasoning_effort: str
60
+
61
+ response_format: Union[Dict[str, object], str]
62
+
63
+ seed: Union[int, str]
64
+
65
+ stop: str
66
+
67
+ store: Union[bool, str]
68
+
69
+ temperature: Union[float, str]
70
+
71
+ tool_choice: str
72
+
73
+ tools: Union[Iterable[Dict[str, object]], str]
74
+
75
+ top_k: Union[int, str]
76
+
77
+ top_logprobs: Union[int, str]
78
+
79
+ top_p: Union[float, str]
80
+
81
+
82
+ ChatCompletionEvaluationTaskRequestConfiguration: TypeAlias = Union[
83
+ ChatCompletionEvaluationTaskRequestConfigurationTyped, Dict[str, object]
84
+ ]
85
+
86
+
87
+ class ChatCompletionEvaluationTaskRequest(TypedDict, total=False):
88
+ configuration: Required[ChatCompletionEvaluationTaskRequestConfiguration]
89
+
90
+ alias: str
91
+ """Alias to title the results column. Defaults to the `task_type`"""
92
+
93
+ task_type: Literal["chat_completion"]
94
+
95
+
96
+ class GenericInferenceEvaluationTaskRequestConfigurationInferenceConfigurationLaunchInferenceConfiguration(
97
+ TypedDict, total=False
98
+ ):
99
+ num_retries: int
100
+
101
+ timeout_seconds: int
102
+
103
+
104
+ GenericInferenceEvaluationTaskRequestConfigurationInferenceConfiguration: TypeAlias = Union[
105
+ GenericInferenceEvaluationTaskRequestConfigurationInferenceConfigurationLaunchInferenceConfiguration, str
106
+ ]
107
+
108
+
109
+ class GenericInferenceEvaluationTaskRequestConfiguration(TypedDict, total=False):
110
+ model: Required[str]
111
+
112
+ args: Union[Dict[str, object], str]
113
+
114
+ inference_configuration: GenericInferenceEvaluationTaskRequestConfigurationInferenceConfiguration
115
+
116
+
117
+ class GenericInferenceEvaluationTaskRequest(TypedDict, total=False):
118
+ configuration: Required[GenericInferenceEvaluationTaskRequestConfiguration]
119
+
120
+ alias: str
121
+ """Alias to title the results column. Defaults to the `task_type`"""
122
+
123
+ task_type: Literal["inference"]
124
+
125
+
126
+ class ApplicationVariantV1EvaluationTaskRequestConfigurationHistoryUnionMember0(TypedDict, total=False):
127
+ request: Required[str]
128
+ """Request inputs"""
129
+
130
+ response: Required[str]
131
+ """Response outputs"""
132
+
133
+ session_data: Dict[str, object]
134
+ """Session data corresponding to the request response pair"""
135
+
136
+
137
+ class ApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesInitialState(
138
+ TypedDict, total=False
139
+ ):
140
+ current_node: Required[str]
141
+
142
+ state: Required[Dict[str, object]]
143
+
144
+
145
+ class ApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesPartialTrace(
146
+ TypedDict, total=False
147
+ ):
148
+ duration_ms: Required[int]
149
+
150
+ node_id: Required[str]
151
+
152
+ operation_input: Required[str]
153
+
154
+ operation_output: Required[str]
155
+
156
+ operation_type: Required[str]
157
+
158
+ start_timestamp: Required[str]
159
+
160
+ workflow_id: Required[str]
161
+
162
+ operation_metadata: Dict[str, object]
163
+
164
+
165
+ class ApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverrides(
166
+ TypedDict, total=False
167
+ ):
168
+ concurrent: bool
169
+
170
+ initial_state: (
171
+ ApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesInitialState
172
+ )
173
+
174
+ partial_trace: Iterable[
175
+ ApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverridesPartialTrace
176
+ ]
177
+
178
+ use_channels: bool
179
+
180
+
181
+ ApplicationVariantV1EvaluationTaskRequestConfigurationOverrides: TypeAlias = Union[
182
+ ApplicationVariantV1EvaluationTaskRequestConfigurationOverridesAgenticApplicationOverrides, str
183
+ ]
184
+
185
+
186
+ class ApplicationVariantV1EvaluationTaskRequestConfiguration(TypedDict, total=False):
187
+ application_variant_id: Required[str]
188
+
189
+ inputs: Required[Union[Dict[str, object], str]]
190
+
191
+ history: Union[Iterable[ApplicationVariantV1EvaluationTaskRequestConfigurationHistoryUnionMember0], str]
192
+
193
+ operation_metadata: Union[Dict[str, object], str]
194
+
195
+ overrides: ApplicationVariantV1EvaluationTaskRequestConfigurationOverrides
196
+ """Execution override options for agentic applications"""
197
+
198
+
199
+ class ApplicationVariantV1EvaluationTaskRequest(TypedDict, total=False):
200
+ configuration: Required[ApplicationVariantV1EvaluationTaskRequestConfiguration]
201
+
202
+ alias: str
203
+ """Alias to title the results column. Defaults to the `task_type`"""
204
+
205
+ task_type: Literal["application_variant"]
206
+
207
+
208
+ EvaluationTaskParam: TypeAlias = Union[
209
+ ChatCompletionEvaluationTaskRequest,
210
+ GenericInferenceEvaluationTaskRequest,
211
+ ApplicationVariantV1EvaluationTaskRequest,
212
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: scale-gp-beta
3
- Version: 0.1.0a6
3
+ Version: 0.1.0a8
4
4
  Summary: The official Python library for the Scale GP API
5
5
  Project-URL: Homepage, https://github.com/scaleapi/sgp-python-beta
6
6
  Project-URL: Repository, https://github.com/scaleapi/sgp-python-beta
@@ -5,13 +5,13 @@ scale_gp_beta/_compat.py,sha256=VWemUKbj6DDkQ-O4baSpHVLJafotzeXmCQGJugfVTIw,6580
5
5
  scale_gp_beta/_constants.py,sha256=S14PFzyN9-I31wiV7SmIlL5Ga0MLHxdvegInGdXH7tM,462
6
6
  scale_gp_beta/_exceptions.py,sha256=95GM5CLFtP-QMjjmzsr5ajjZOyEZvyaETfGmqNPR8YM,3226
7
7
  scale_gp_beta/_files.py,sha256=VHiUi-XDLm5MK8EbVoB2TdgX3jbYshIfxYLeKv5jaYI,3620
8
- scale_gp_beta/_models.py,sha256=CTC-fpbbGneROztxHX-PkLntPt1ZMmwDqoKY9VAIOVg,29071
8
+ scale_gp_beta/_models.py,sha256=Bg-k8-T1kDWURAYXrbDF5FSAyLEy7k90Jrvne-dF4Wc,29070
9
9
  scale_gp_beta/_qs.py,sha256=AOkSz4rHtK4YI3ZU_kzea-zpwBUgEY8WniGmTPyEimc,4846
10
10
  scale_gp_beta/_resource.py,sha256=siZly_U6D0AOVLAzaOsqUdEFFzVMbWRj-ml30nvRp7E,1118
11
11
  scale_gp_beta/_response.py,sha256=ATtij8CjXVjmhdOWozU9Y0SP4Q_uxCYGFUHroxFnSc4,28853
12
12
  scale_gp_beta/_streaming.py,sha256=fcCSGXslmi2SmmkM05g2SACXHk2Mj7k1X5uMBu6U5s8,10112
13
13
  scale_gp_beta/_types.py,sha256=ScQhVBaKbtJrER3NkXbjokWE9DqSqREMIw9LE0NrFfA,6150
14
- scale_gp_beta/_version.py,sha256=tBulsXt5NWpm7sumLsQGrulDcZ8nJyHBl3-3Y5NjBwQ,173
14
+ scale_gp_beta/_version.py,sha256=ckvM_tv85xMh1KIzW_1D1v_kQnwo7N4V73wdvq0MjGU,173
15
15
  scale_gp_beta/pagination.py,sha256=6AAa8_V0wARlMd1MIXijugYbG1mILGc2tHVKbUQbZyQ,2595
16
16
  scale_gp_beta/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  scale_gp_beta/_utils/__init__.py,sha256=PNZ_QJuzZEgyYXqkO1HVhGkj5IU9bglVUcw7H-Knjzw,2062
@@ -20,7 +20,7 @@ scale_gp_beta/_utils/_proxy.py,sha256=z3zsateHtb0EARTWKk8QZNHfPkqJbqwd1lM993LBwG
20
20
  scale_gp_beta/_utils/_reflection.py,sha256=ZmGkIgT_PuwedyNBrrKGbxoWtkpytJNU1uU4QHnmEMU,1364
21
21
  scale_gp_beta/_utils/_streams.py,sha256=SMC90diFFecpEg_zgDRVbdR3hSEIgVVij4taD-noMLM,289
22
22
  scale_gp_beta/_utils/_sync.py,sha256=TpGLrrhRNWTJtODNE6Fup3_k7zrWm1j2RlirzBwre-0,2862
23
- scale_gp_beta/_utils/_transform.py,sha256=tsSFOIZ7iczaUsMSGBD_iSFOOdUyT2xtkcq1xyF0L9o,13986
23
+ scale_gp_beta/_utils/_transform.py,sha256=asrbdx4Pf5NupzaB8QdEjypW_DgHjjkpswHT0Jum4S0,13987
24
24
  scale_gp_beta/_utils/_typing.py,sha256=nTJz0jcrQbEgxwy4TtAkNxuU0QHHlmc6mQtA6vIR8tg,4501
25
25
  scale_gp_beta/_utils/_utils.py,sha256=8UmbPOy_AAr2uUjjFui-VZSrVBHRj6bfNEKRp5YZP2A,12004
26
26
  scale_gp_beta/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224
@@ -29,7 +29,7 @@ scale_gp_beta/resources/completions.py,sha256=dk7Uvl8dnnieRWTJr2fhzJMZwOzGIjsYSw
29
29
  scale_gp_beta/resources/dataset_items.py,sha256=BOx6ddLcDvL5L9HweCY62QnxflAs1jQJC-Ni-xzMUf0,22361
30
30
  scale_gp_beta/resources/datasets.py,sha256=tE2JKxlHc48Sm-SpySH4f265CxCBlaHWT68jKiJKJc8,20717
31
31
  scale_gp_beta/resources/evaluation_items.py,sha256=iD-srR9ZQQ3WAutxA98_MMj4_1h-B1_lZ_Ujb9F6Ino,11460
32
- scale_gp_beta/resources/evaluations.py,sha256=61sMyGqrH-gztzhY82UmXyV0Ip-s9LQvBiuFEzQgHsQ,25855
32
+ scale_gp_beta/resources/evaluations.py,sha256=HzTnmgC1l8wAKMEZ5UWgXgBqY_Ax4E-yi9A_r4nlfEA,25677
33
33
  scale_gp_beta/resources/inference.py,sha256=_20eN0x0PZBPNLx2VrozQrJgRVjtlXPjeTpTcnuP0bU,7576
34
34
  scale_gp_beta/resources/models.py,sha256=85F8qPJN9lBPbfNm9F8bHpdJSsyekS9B3GDPJtCXaMA,32658
35
35
  scale_gp_beta/resources/chat/__init__.py,sha256=BVAfz9TM3DT5W9f_mt0P9YRxL_MsUxKCWAH6u1iogmA,1041
@@ -38,7 +38,7 @@ scale_gp_beta/resources/chat/completions.py,sha256=ZPa4-9RNKTXldq2VjWahNrlXeO_jY
38
38
  scale_gp_beta/resources/files/__init__.py,sha256=VgAtqUimN5Kf_-lmEaNBnu_ApGegKsJQ1zNf-42MXFA,1002
39
39
  scale_gp_beta/resources/files/content.py,sha256=oJxb-28ZOUBgzE_MiAaJOcKFmtlB-N5APdhfZBNJna8,5762
40
40
  scale_gp_beta/resources/files/files.py,sha256=M8OdZoIi3fFjJL7oIn8w9TD6TVcASCMy1Ze1YZRbPMo,20530
41
- scale_gp_beta/types/__init__.py,sha256=lmW3hfEjdEqRS2iSpGQ_YC8IYtVnbYMyoHR6zWEgQZE,3114
41
+ scale_gp_beta/types/__init__.py,sha256=x-G9Qo48WT9ct-93waO6DCNtk0jJVRUzKQnjoIAZzto,3192
42
42
  scale_gp_beta/types/completion.py,sha256=5eewo25sdqL4vutqvE8wmugE0Cw6YLzZ0_AD6yjP9NM,3259
43
43
  scale_gp_beta/types/completion_create_params.py,sha256=LE9vna29Kbh7E8qUq7EhQbcu7YuCF_h663maKtzOnhk,3063
44
44
  scale_gp_beta/types/dataset.py,sha256=uTlOPgxkzpiAFOr_NOOYqhTsrqOKkOyXvxzczIe9gaE,511
@@ -56,12 +56,13 @@ scale_gp_beta/types/dataset_retrieve_params.py,sha256=5tpzuzX6y1WKKxP2AbjYwwcATp
56
56
  scale_gp_beta/types/dataset_update_params.py,sha256=Aw7m-jat5P82qJGJgsuz6xx6nu96jYdfJnM_TR94PAE,287
57
57
  scale_gp_beta/types/evaluation.py,sha256=U8m8P0dX290Pv-jG2ZiiV9djQ1dUkyLI1tv0SGgIH8c,609
58
58
  scale_gp_beta/types/evaluation_archive_response.py,sha256=SkGy3GourPaQRPt84smBd1XhwZqeDTDINK7PtyEEpSY,356
59
- scale_gp_beta/types/evaluation_create_params.py,sha256=nmbi633xCnOY28cnvH595s2hlk7xKU0surVyYAbeSlQ,25504
59
+ scale_gp_beta/types/evaluation_create_params.py,sha256=1thnDWVjogRI_CaBQvkyBXdd0o8L1YYw0HXC3IM4Cxg,2431
60
60
  scale_gp_beta/types/evaluation_item.py,sha256=KENQ19JXAEGD0q82HwQ6GDx69giPmzP0h0Uo8ehWF8w,603
61
61
  scale_gp_beta/types/evaluation_item_list_params.py,sha256=LquF3dWIU6b7O_Sy_b0R2FMk5XC-Jm6mEHLJGUOKLuk,435
62
62
  scale_gp_beta/types/evaluation_item_retrieve_params.py,sha256=UYEKIAQ4dy92ZOSV1tWDZcvXG7_0BSpOND5Ehzs7QM4,296
63
63
  scale_gp_beta/types/evaluation_list_params.py,sha256=3e8fR1LCDhODoRrvgriRvgLhpIp0HXEZdjQ6P9p-eD4,393
64
64
  scale_gp_beta/types/evaluation_retrieve_params.py,sha256=tqzzbcD-hEcUcGFrfIhH3fG1cXuLvQGHZlyS-e3Zsx4,288
65
+ scale_gp_beta/types/evaluation_task_param.py,sha256=b8kIoFo0sR88JBm3VzHeNPoyJKygh2g9OzJRbJcKfoU,6276
65
66
  scale_gp_beta/types/file.py,sha256=Xkha0eSr1q6hkwjE9e2XNgk8kuHNoTEe1LXNhz6o-1k,528
66
67
  scale_gp_beta/types/file_create_params.py,sha256=KpXv6JCbd8BlgceTmBTewxOky2JTJaTW3mcGiVVU7wE,317
67
68
  scale_gp_beta/types/file_delete_response.py,sha256=lOsiaw8qrUOnH7smxb27-n7M4D1chfXlAUaMTRmdldY,336
@@ -79,12 +80,12 @@ scale_gp_beta/types/model_delete_response.py,sha256=fSpTChRLHPOoc9SJbkS4wcLxVOc3
79
80
  scale_gp_beta/types/model_list_params.py,sha256=617LRolXLNCV8kadHK7XRGN-0woh0mvj88ZSbPLdGDg,702
80
81
  scale_gp_beta/types/model_update_params.py,sha256=RFXvs-EIDHmNO-fnPB8H6B9DlK6bYVsiwFDMPPFHGII,3701
81
82
  scale_gp_beta/types/chat/__init__.py,sha256=DA0PFPt0oaPb25RI7Cs3RQEJfDLg5-qBiU8l0S_3nnw,443
82
- scale_gp_beta/types/chat/chat_completion.py,sha256=MswoiGtEb_ik1OHkk6k4jrpcH4lqmOdnIXLnGZJy7wM,7314
83
- scale_gp_beta/types/chat/chat_completion_chunk.py,sha256=6anUxR5cLdhEhhSgjh3tFbH_7crpD9dktsAnrBLxykQ,7047
83
+ scale_gp_beta/types/chat/chat_completion.py,sha256=oYOrsTBdGwV9e_mF4F06XmdyZI4pyw06IQ-0mFn0etk,8250
84
+ scale_gp_beta/types/chat/chat_completion_chunk.py,sha256=57-i6LyOk6IX2HZvXsoUC26e0D0dVBNQjX_GbT0eKEw,7060
84
85
  scale_gp_beta/types/chat/completion_create_params.py,sha256=Y7vJNvNM4Sov77l55aS5YtyRnrf7isediu3nKr6YE-A,4505
85
86
  scale_gp_beta/types/chat/completion_create_response.py,sha256=0OhfoJW8azVRrZdXRRMuiJ7kEEeMDnKScxrr3sayzDo,374
86
87
  scale_gp_beta/types/files/__init__.py,sha256=OKfJYcKb4NObdiRObqJV_dOyDQ8feXekDUge2o_4pXQ,122
87
- scale_gp_beta-0.1.0a6.dist-info/METADATA,sha256=XeDXaHh4XorBJB9SDAdZMVRVee4JthB20lmhahQqPm0,16938
88
- scale_gp_beta-0.1.0a6.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
89
- scale_gp_beta-0.1.0a6.dist-info/licenses/LICENSE,sha256=x49Bj8r_ZpqfzThbmfHyZ_bE88XvHdIMI_ANyLHFFRE,11338
90
- scale_gp_beta-0.1.0a6.dist-info/RECORD,,
88
+ scale_gp_beta-0.1.0a8.dist-info/METADATA,sha256=wGSQ9SUjufrly6jFr44YaxFahnUbItFw4uaQeBKTjso,16938
89
+ scale_gp_beta-0.1.0a8.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
90
+ scale_gp_beta-0.1.0a8.dist-info/licenses/LICENSE,sha256=x49Bj8r_ZpqfzThbmfHyZ_bE88XvHdIMI_ANyLHFFRE,11338
91
+ scale_gp_beta-0.1.0a8.dist-info/RECORD,,