rapidata 2.33.2__py3-none-any.whl → 2.34.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rapidata might be problematic. Click here for more details.
- rapidata/__init__.py +1 -1
- rapidata/api_client/__init__.py +4 -0
- rapidata/api_client/api/__init__.py +1 -0
- rapidata/api_client/api/benchmark_api.py +6 -5
- rapidata/api_client/api/leaderboard_api.py +29 -296
- rapidata/api_client/api/prompt_api.py +320 -0
- rapidata/api_client/api/validation_set_api.py +3 -3
- rapidata/api_client/models/__init__.py +3 -0
- rapidata/api_client/models/conditional_validation_selection.py +4 -2
- rapidata/api_client/models/create_leaderboard_model.py +9 -2
- rapidata/api_client/models/get_standing_by_id_result.py +4 -15
- rapidata/api_client/models/prompt_by_benchmark_result.py +3 -1
- rapidata/api_client/models/standing_by_leaderboard.py +1 -1
- rapidata/api_client/models/standings_by_leaderboard_result.py +95 -0
- rapidata/api_client/models/tags_by_benchmark_result.py +87 -0
- rapidata/api_client/models/update_prompt_tags_model.py +87 -0
- rapidata/api_client_README.md +5 -2
- rapidata/rapidata_client/benchmark/leaderboard/rapidata_leaderboard.py +12 -9
- rapidata/rapidata_client/benchmark/rapidata_benchmark.py +26 -5
- rapidata/rapidata_client/benchmark/rapidata_benchmark_manager.py +16 -8
- {rapidata-2.33.2.dist-info → rapidata-2.34.1.dist-info}/METADATA +1 -1
- {rapidata-2.33.2.dist-info → rapidata-2.34.1.dist-info}/RECORD +24 -20
- {rapidata-2.33.2.dist-info → rapidata-2.34.1.dist-info}/LICENSE +0 -0
- {rapidata-2.33.2.dist-info → rapidata-2.34.1.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Rapidata.Dataset
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: v1
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
import warnings
|
|
15
|
+
from pydantic import validate_call, Field, StrictFloat, StrictStr, StrictInt
|
|
16
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
17
|
+
from typing_extensions import Annotated
|
|
18
|
+
|
|
19
|
+
from pydantic import StrictStr
|
|
20
|
+
from typing import Optional
|
|
21
|
+
from rapidata.api_client.models.update_prompt_tags_model import UpdatePromptTagsModel
|
|
22
|
+
|
|
23
|
+
from rapidata.api_client.api_client import ApiClient, RequestSerialized
|
|
24
|
+
from rapidata.api_client.api_response import ApiResponse
|
|
25
|
+
from rapidata.api_client.rest import RESTResponseType
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class PromptApi:
|
|
29
|
+
"""NOTE: This class is auto generated by OpenAPI Generator
|
|
30
|
+
Ref: https://openapi-generator.tech
|
|
31
|
+
|
|
32
|
+
Do not edit the class manually.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self, api_client=None) -> None:
|
|
36
|
+
if api_client is None:
|
|
37
|
+
api_client = ApiClient.get_default()
|
|
38
|
+
self.api_client = api_client
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@validate_call
|
|
42
|
+
def benchmark_prompt_prompt_id_tags_put(
|
|
43
|
+
self,
|
|
44
|
+
prompt_id: StrictStr,
|
|
45
|
+
update_prompt_tags_model: Optional[UpdatePromptTagsModel] = None,
|
|
46
|
+
_request_timeout: Union[
|
|
47
|
+
None,
|
|
48
|
+
Annotated[StrictFloat, Field(gt=0)],
|
|
49
|
+
Tuple[
|
|
50
|
+
Annotated[StrictFloat, Field(gt=0)],
|
|
51
|
+
Annotated[StrictFloat, Field(gt=0)]
|
|
52
|
+
]
|
|
53
|
+
] = None,
|
|
54
|
+
_request_auth: Optional[Dict[StrictStr, Any]] = None,
|
|
55
|
+
_content_type: Optional[StrictStr] = None,
|
|
56
|
+
_headers: Optional[Dict[StrictStr, Any]] = None,
|
|
57
|
+
_host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
|
|
58
|
+
) -> None:
|
|
59
|
+
"""Updates the tags associated with a prompt.
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
:param prompt_id: (required)
|
|
63
|
+
:type prompt_id: str
|
|
64
|
+
:param update_prompt_tags_model:
|
|
65
|
+
:type update_prompt_tags_model: UpdatePromptTagsModel
|
|
66
|
+
:param _request_timeout: timeout setting for this request. If one
|
|
67
|
+
number provided, it will be total request
|
|
68
|
+
timeout. It can also be a pair (tuple) of
|
|
69
|
+
(connection, read) timeouts.
|
|
70
|
+
:type _request_timeout: int, tuple(int, int), optional
|
|
71
|
+
:param _request_auth: set to override the auth_settings for an a single
|
|
72
|
+
request; this effectively ignores the
|
|
73
|
+
authentication in the spec for a single request.
|
|
74
|
+
:type _request_auth: dict, optional
|
|
75
|
+
:param _content_type: force content-type for the request.
|
|
76
|
+
:type _content_type: str, Optional
|
|
77
|
+
:param _headers: set to override the headers for a single
|
|
78
|
+
request; this effectively ignores the headers
|
|
79
|
+
in the spec for a single request.
|
|
80
|
+
:type _headers: dict, optional
|
|
81
|
+
:param _host_index: set to override the host_index for a single
|
|
82
|
+
request; this effectively ignores the host_index
|
|
83
|
+
in the spec for a single request.
|
|
84
|
+
:type _host_index: int, optional
|
|
85
|
+
:return: Returns the result object.
|
|
86
|
+
""" # noqa: E501
|
|
87
|
+
|
|
88
|
+
_param = self._benchmark_prompt_prompt_id_tags_put_serialize(
|
|
89
|
+
prompt_id=prompt_id,
|
|
90
|
+
update_prompt_tags_model=update_prompt_tags_model,
|
|
91
|
+
_request_auth=_request_auth,
|
|
92
|
+
_content_type=_content_type,
|
|
93
|
+
_headers=_headers,
|
|
94
|
+
_host_index=_host_index
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
_response_types_map: Dict[str, Optional[str]] = {
|
|
98
|
+
'204': None,
|
|
99
|
+
}
|
|
100
|
+
response_data = self.api_client.call_api(
|
|
101
|
+
*_param,
|
|
102
|
+
_request_timeout=_request_timeout
|
|
103
|
+
)
|
|
104
|
+
response_data.read()
|
|
105
|
+
return self.api_client.response_deserialize(
|
|
106
|
+
response_data=response_data,
|
|
107
|
+
response_types_map=_response_types_map,
|
|
108
|
+
).data
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@validate_call
|
|
112
|
+
def benchmark_prompt_prompt_id_tags_put_with_http_info(
|
|
113
|
+
self,
|
|
114
|
+
prompt_id: StrictStr,
|
|
115
|
+
update_prompt_tags_model: Optional[UpdatePromptTagsModel] = None,
|
|
116
|
+
_request_timeout: Union[
|
|
117
|
+
None,
|
|
118
|
+
Annotated[StrictFloat, Field(gt=0)],
|
|
119
|
+
Tuple[
|
|
120
|
+
Annotated[StrictFloat, Field(gt=0)],
|
|
121
|
+
Annotated[StrictFloat, Field(gt=0)]
|
|
122
|
+
]
|
|
123
|
+
] = None,
|
|
124
|
+
_request_auth: Optional[Dict[StrictStr, Any]] = None,
|
|
125
|
+
_content_type: Optional[StrictStr] = None,
|
|
126
|
+
_headers: Optional[Dict[StrictStr, Any]] = None,
|
|
127
|
+
_host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
|
|
128
|
+
) -> ApiResponse[None]:
|
|
129
|
+
"""Updates the tags associated with a prompt.
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
:param prompt_id: (required)
|
|
133
|
+
:type prompt_id: str
|
|
134
|
+
:param update_prompt_tags_model:
|
|
135
|
+
:type update_prompt_tags_model: UpdatePromptTagsModel
|
|
136
|
+
:param _request_timeout: timeout setting for this request. If one
|
|
137
|
+
number provided, it will be total request
|
|
138
|
+
timeout. It can also be a pair (tuple) of
|
|
139
|
+
(connection, read) timeouts.
|
|
140
|
+
:type _request_timeout: int, tuple(int, int), optional
|
|
141
|
+
:param _request_auth: set to override the auth_settings for an a single
|
|
142
|
+
request; this effectively ignores the
|
|
143
|
+
authentication in the spec for a single request.
|
|
144
|
+
:type _request_auth: dict, optional
|
|
145
|
+
:param _content_type: force content-type for the request.
|
|
146
|
+
:type _content_type: str, Optional
|
|
147
|
+
:param _headers: set to override the headers for a single
|
|
148
|
+
request; this effectively ignores the headers
|
|
149
|
+
in the spec for a single request.
|
|
150
|
+
:type _headers: dict, optional
|
|
151
|
+
:param _host_index: set to override the host_index for a single
|
|
152
|
+
request; this effectively ignores the host_index
|
|
153
|
+
in the spec for a single request.
|
|
154
|
+
:type _host_index: int, optional
|
|
155
|
+
:return: Returns the result object.
|
|
156
|
+
""" # noqa: E501
|
|
157
|
+
|
|
158
|
+
_param = self._benchmark_prompt_prompt_id_tags_put_serialize(
|
|
159
|
+
prompt_id=prompt_id,
|
|
160
|
+
update_prompt_tags_model=update_prompt_tags_model,
|
|
161
|
+
_request_auth=_request_auth,
|
|
162
|
+
_content_type=_content_type,
|
|
163
|
+
_headers=_headers,
|
|
164
|
+
_host_index=_host_index
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
_response_types_map: Dict[str, Optional[str]] = {
|
|
168
|
+
'204': None,
|
|
169
|
+
}
|
|
170
|
+
response_data = self.api_client.call_api(
|
|
171
|
+
*_param,
|
|
172
|
+
_request_timeout=_request_timeout
|
|
173
|
+
)
|
|
174
|
+
response_data.read()
|
|
175
|
+
return self.api_client.response_deserialize(
|
|
176
|
+
response_data=response_data,
|
|
177
|
+
response_types_map=_response_types_map,
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
@validate_call
|
|
182
|
+
def benchmark_prompt_prompt_id_tags_put_without_preload_content(
|
|
183
|
+
self,
|
|
184
|
+
prompt_id: StrictStr,
|
|
185
|
+
update_prompt_tags_model: Optional[UpdatePromptTagsModel] = None,
|
|
186
|
+
_request_timeout: Union[
|
|
187
|
+
None,
|
|
188
|
+
Annotated[StrictFloat, Field(gt=0)],
|
|
189
|
+
Tuple[
|
|
190
|
+
Annotated[StrictFloat, Field(gt=0)],
|
|
191
|
+
Annotated[StrictFloat, Field(gt=0)]
|
|
192
|
+
]
|
|
193
|
+
] = None,
|
|
194
|
+
_request_auth: Optional[Dict[StrictStr, Any]] = None,
|
|
195
|
+
_content_type: Optional[StrictStr] = None,
|
|
196
|
+
_headers: Optional[Dict[StrictStr, Any]] = None,
|
|
197
|
+
_host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
|
|
198
|
+
) -> RESTResponseType:
|
|
199
|
+
"""Updates the tags associated with a prompt.
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
:param prompt_id: (required)
|
|
203
|
+
:type prompt_id: str
|
|
204
|
+
:param update_prompt_tags_model:
|
|
205
|
+
:type update_prompt_tags_model: UpdatePromptTagsModel
|
|
206
|
+
:param _request_timeout: timeout setting for this request. If one
|
|
207
|
+
number provided, it will be total request
|
|
208
|
+
timeout. It can also be a pair (tuple) of
|
|
209
|
+
(connection, read) timeouts.
|
|
210
|
+
:type _request_timeout: int, tuple(int, int), optional
|
|
211
|
+
:param _request_auth: set to override the auth_settings for an a single
|
|
212
|
+
request; this effectively ignores the
|
|
213
|
+
authentication in the spec for a single request.
|
|
214
|
+
:type _request_auth: dict, optional
|
|
215
|
+
:param _content_type: force content-type for the request.
|
|
216
|
+
:type _content_type: str, Optional
|
|
217
|
+
:param _headers: set to override the headers for a single
|
|
218
|
+
request; this effectively ignores the headers
|
|
219
|
+
in the spec for a single request.
|
|
220
|
+
:type _headers: dict, optional
|
|
221
|
+
:param _host_index: set to override the host_index for a single
|
|
222
|
+
request; this effectively ignores the host_index
|
|
223
|
+
in the spec for a single request.
|
|
224
|
+
:type _host_index: int, optional
|
|
225
|
+
:return: Returns the result object.
|
|
226
|
+
""" # noqa: E501
|
|
227
|
+
|
|
228
|
+
_param = self._benchmark_prompt_prompt_id_tags_put_serialize(
|
|
229
|
+
prompt_id=prompt_id,
|
|
230
|
+
update_prompt_tags_model=update_prompt_tags_model,
|
|
231
|
+
_request_auth=_request_auth,
|
|
232
|
+
_content_type=_content_type,
|
|
233
|
+
_headers=_headers,
|
|
234
|
+
_host_index=_host_index
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
_response_types_map: Dict[str, Optional[str]] = {
|
|
238
|
+
'204': None,
|
|
239
|
+
}
|
|
240
|
+
response_data = self.api_client.call_api(
|
|
241
|
+
*_param,
|
|
242
|
+
_request_timeout=_request_timeout
|
|
243
|
+
)
|
|
244
|
+
return response_data.response
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def _benchmark_prompt_prompt_id_tags_put_serialize(
|
|
248
|
+
self,
|
|
249
|
+
prompt_id,
|
|
250
|
+
update_prompt_tags_model,
|
|
251
|
+
_request_auth,
|
|
252
|
+
_content_type,
|
|
253
|
+
_headers,
|
|
254
|
+
_host_index,
|
|
255
|
+
) -> RequestSerialized:
|
|
256
|
+
|
|
257
|
+
_host = None
|
|
258
|
+
|
|
259
|
+
_collection_formats: Dict[str, str] = {
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
_path_params: Dict[str, str] = {}
|
|
263
|
+
_query_params: List[Tuple[str, str]] = []
|
|
264
|
+
_header_params: Dict[str, Optional[str]] = _headers or {}
|
|
265
|
+
_form_params: List[Tuple[str, str]] = []
|
|
266
|
+
_files: Dict[
|
|
267
|
+
str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]]
|
|
268
|
+
] = {}
|
|
269
|
+
_body_params: Optional[bytes] = None
|
|
270
|
+
|
|
271
|
+
# process the path parameters
|
|
272
|
+
if prompt_id is not None:
|
|
273
|
+
_path_params['promptId'] = prompt_id
|
|
274
|
+
# process the query parameters
|
|
275
|
+
# process the header parameters
|
|
276
|
+
# process the form parameters
|
|
277
|
+
# process the body parameter
|
|
278
|
+
if update_prompt_tags_model is not None:
|
|
279
|
+
_body_params = update_prompt_tags_model
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
# set the HTTP header `Content-Type`
|
|
284
|
+
if _content_type:
|
|
285
|
+
_header_params['Content-Type'] = _content_type
|
|
286
|
+
else:
|
|
287
|
+
_default_content_type = (
|
|
288
|
+
self.api_client.select_header_content_type(
|
|
289
|
+
[
|
|
290
|
+
'application/json',
|
|
291
|
+
'text/json',
|
|
292
|
+
'application/*+json'
|
|
293
|
+
]
|
|
294
|
+
)
|
|
295
|
+
)
|
|
296
|
+
if _default_content_type is not None:
|
|
297
|
+
_header_params['Content-Type'] = _default_content_type
|
|
298
|
+
|
|
299
|
+
# authentication setting
|
|
300
|
+
_auth_settings: List[str] = [
|
|
301
|
+
'bearer',
|
|
302
|
+
'oauth2'
|
|
303
|
+
]
|
|
304
|
+
|
|
305
|
+
return self.api_client.param_serialize(
|
|
306
|
+
method='PUT',
|
|
307
|
+
resource_path='/benchmark-prompt/{promptId}/tags',
|
|
308
|
+
path_params=_path_params,
|
|
309
|
+
query_params=_query_params,
|
|
310
|
+
header_params=_header_params,
|
|
311
|
+
body=_body_params,
|
|
312
|
+
post_params=_form_params,
|
|
313
|
+
files=_files,
|
|
314
|
+
auth_settings=_auth_settings,
|
|
315
|
+
collection_formats=_collection_formats,
|
|
316
|
+
_host=_host,
|
|
317
|
+
_request_auth=_request_auth
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
|
|
@@ -3186,7 +3186,7 @@ class ValidationSetApi:
|
|
|
3186
3186
|
_headers: Optional[Dict[StrictStr, Any]] = None,
|
|
3187
3187
|
_host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
|
|
3188
3188
|
) -> None:
|
|
3189
|
-
"""(Deprecated) Updates the
|
|
3189
|
+
"""(Deprecated) Updates the of all rapshouldAlert property of all rapids within a validation set.
|
|
3190
3190
|
|
|
3191
3191
|
|
|
3192
3192
|
:param validation_set_id: The Id of the validation set to update the dimensions for. (required)
|
|
@@ -3257,7 +3257,7 @@ class ValidationSetApi:
|
|
|
3257
3257
|
_headers: Optional[Dict[StrictStr, Any]] = None,
|
|
3258
3258
|
_host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
|
|
3259
3259
|
) -> ApiResponse[None]:
|
|
3260
|
-
"""(Deprecated) Updates the
|
|
3260
|
+
"""(Deprecated) Updates the of all rapshouldAlert property of all rapids within a validation set.
|
|
3261
3261
|
|
|
3262
3262
|
|
|
3263
3263
|
:param validation_set_id: The Id of the validation set to update the dimensions for. (required)
|
|
@@ -3328,7 +3328,7 @@ class ValidationSetApi:
|
|
|
3328
3328
|
_headers: Optional[Dict[StrictStr, Any]] = None,
|
|
3329
3329
|
_host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
|
|
3330
3330
|
) -> RESTResponseType:
|
|
3331
|
-
"""(Deprecated) Updates the
|
|
3331
|
+
"""(Deprecated) Updates the of all rapshouldAlert property of all rapids within a validation set.
|
|
3332
3332
|
|
|
3333
3333
|
|
|
3334
3334
|
:param validation_set_id: The Id of the validation set to update the dimensions for. (required)
|
|
@@ -331,6 +331,7 @@ from rapidata.api_client.models.source_url_metadata_model import SourceUrlMetada
|
|
|
331
331
|
from rapidata.api_client.models.standing_by_leaderboard import StandingByLeaderboard
|
|
332
332
|
from rapidata.api_client.models.standing_by_leaderboard_paged_result import StandingByLeaderboardPagedResult
|
|
333
333
|
from rapidata.api_client.models.standing_status import StandingStatus
|
|
334
|
+
from rapidata.api_client.models.standings_by_leaderboard_result import StandingsByLeaderboardResult
|
|
334
335
|
from rapidata.api_client.models.static_selection import StaticSelection
|
|
335
336
|
from rapidata.api_client.models.sticky_state import StickyState
|
|
336
337
|
from rapidata.api_client.models.stream_file_wrapper import StreamFileWrapper
|
|
@@ -342,6 +343,7 @@ from rapidata.api_client.models.submit_order_model import SubmitOrderModel
|
|
|
342
343
|
from rapidata.api_client.models.submit_participant_result import SubmitParticipantResult
|
|
343
344
|
from rapidata.api_client.models.submit_prompt_model import SubmitPromptModel
|
|
344
345
|
from rapidata.api_client.models.submit_prompt_model_prompt_asset import SubmitPromptModelPromptAsset
|
|
346
|
+
from rapidata.api_client.models.tags_by_benchmark_result import TagsByBenchmarkResult
|
|
345
347
|
from rapidata.api_client.models.text_asset import TextAsset
|
|
346
348
|
from rapidata.api_client.models.text_asset_input import TextAssetInput
|
|
347
349
|
from rapidata.api_client.models.text_asset_model import TextAssetModel
|
|
@@ -365,6 +367,7 @@ from rapidata.api_client.models.update_dimensions_model import UpdateDimensionsM
|
|
|
365
367
|
from rapidata.api_client.models.update_leaderboard_name_model import UpdateLeaderboardNameModel
|
|
366
368
|
from rapidata.api_client.models.update_order_name_model import UpdateOrderNameModel
|
|
367
369
|
from rapidata.api_client.models.update_participant_name_model import UpdateParticipantNameModel
|
|
370
|
+
from rapidata.api_client.models.update_prompt_tags_model import UpdatePromptTagsModel
|
|
368
371
|
from rapidata.api_client.models.update_should_alert_model import UpdateShouldAlertModel
|
|
369
372
|
from rapidata.api_client.models.update_validation_rapid_model import UpdateValidationRapidModel
|
|
370
373
|
from rapidata.api_client.models.update_validation_rapid_model_truth import UpdateValidationRapidModelTruth
|
|
@@ -31,7 +31,8 @@ class ConditionalValidationSelection(BaseModel):
|
|
|
31
31
|
validation_set_id: StrictStr = Field(alias="validationSetId")
|
|
32
32
|
validation_chances: List[ValidationChance] = Field(alias="validationChances")
|
|
33
33
|
dimension: Optional[StrictStr] = None
|
|
34
|
-
|
|
34
|
+
dimensions: Optional[List[StrictStr]] = None
|
|
35
|
+
__properties: ClassVar[List[str]] = ["_t", "validationSetId", "validationChances", "dimension", "dimensions"]
|
|
35
36
|
|
|
36
37
|
@field_validator('t')
|
|
37
38
|
def t_validate_enum(cls, value):
|
|
@@ -106,7 +107,8 @@ class ConditionalValidationSelection(BaseModel):
|
|
|
106
107
|
"_t": obj.get("_t") if obj.get("_t") is not None else 'ConditionalValidationSelection',
|
|
107
108
|
"validationSetId": obj.get("validationSetId"),
|
|
108
109
|
"validationChances": [ValidationChance.from_dict(_item) for _item in obj["validationChances"]] if obj.get("validationChances") is not None else None,
|
|
109
|
-
"dimension": obj.get("dimension")
|
|
110
|
+
"dimension": obj.get("dimension"),
|
|
111
|
+
"dimensions": obj.get("dimensions")
|
|
110
112
|
})
|
|
111
113
|
return _obj
|
|
112
114
|
|
|
@@ -35,7 +35,8 @@ class CreateLeaderboardModel(BaseModel):
|
|
|
35
35
|
response_budget: Optional[StrictInt] = Field(default=None, description="Total amount of responses that get collected per run", alias="responseBudget")
|
|
36
36
|
min_responses: Optional[StrictInt] = Field(default=None, description="The minimum amount of responses that need to be collected per comparison.", alias="minResponses")
|
|
37
37
|
is_inversed: Optional[StrictBool] = Field(default=None, description="If the results should be inversed, meaning people should select the worse model.", alias="isInversed")
|
|
38
|
-
|
|
38
|
+
validation_set_id: Optional[StrictStr] = Field(default=None, description="The Validation set that should be attached to every run.", alias="validationSetId")
|
|
39
|
+
__properties: ClassVar[List[str]] = ["benchmarkId", "benchmarkName", "name", "instruction", "showPrompt", "showPromptAsset", "responseBudget", "minResponses", "isInversed", "validationSetId"]
|
|
39
40
|
|
|
40
41
|
model_config = ConfigDict(
|
|
41
42
|
populate_by_name=True,
|
|
@@ -86,6 +87,11 @@ class CreateLeaderboardModel(BaseModel):
|
|
|
86
87
|
if self.benchmark_name is None and "benchmark_name" in self.model_fields_set:
|
|
87
88
|
_dict['benchmarkName'] = None
|
|
88
89
|
|
|
90
|
+
# set to None if validation_set_id (nullable) is None
|
|
91
|
+
# and model_fields_set contains the field
|
|
92
|
+
if self.validation_set_id is None and "validation_set_id" in self.model_fields_set:
|
|
93
|
+
_dict['validationSetId'] = None
|
|
94
|
+
|
|
89
95
|
return _dict
|
|
90
96
|
|
|
91
97
|
@classmethod
|
|
@@ -106,7 +112,8 @@ class CreateLeaderboardModel(BaseModel):
|
|
|
106
112
|
"showPromptAsset": obj.get("showPromptAsset"),
|
|
107
113
|
"responseBudget": obj.get("responseBudget"),
|
|
108
114
|
"minResponses": obj.get("minResponses"),
|
|
109
|
-
"isInversed": obj.get("isInversed")
|
|
115
|
+
"isInversed": obj.get("isInversed"),
|
|
116
|
+
"validationSetId": obj.get("validationSetId")
|
|
110
117
|
})
|
|
111
118
|
return _obj
|
|
112
119
|
|
|
@@ -17,8 +17,8 @@ import pprint
|
|
|
17
17
|
import re # noqa: F401
|
|
18
18
|
import json
|
|
19
19
|
|
|
20
|
-
from pydantic import BaseModel, ConfigDict, Field, StrictBool,
|
|
21
|
-
from typing import Any, ClassVar, Dict, List
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr, field_validator
|
|
21
|
+
from typing import Any, ClassVar, Dict, List
|
|
22
22
|
from typing import Optional, Set
|
|
23
23
|
from typing_extensions import Self
|
|
24
24
|
|
|
@@ -32,10 +32,7 @@ class GetStandingByIdResult(BaseModel):
|
|
|
32
32
|
dataset_id: StrictStr = Field(alias="datasetId")
|
|
33
33
|
status: StrictStr
|
|
34
34
|
is_disabled: StrictBool = Field(alias="isDisabled")
|
|
35
|
-
|
|
36
|
-
wins: StrictInt
|
|
37
|
-
total_matches: StrictInt = Field(alias="totalMatches")
|
|
38
|
-
__properties: ClassVar[List[str]] = ["id", "name", "benchmarkId", "datasetId", "status", "isDisabled", "score", "wins", "totalMatches"]
|
|
35
|
+
__properties: ClassVar[List[str]] = ["id", "name", "benchmarkId", "datasetId", "status", "isDisabled"]
|
|
39
36
|
|
|
40
37
|
@field_validator('status')
|
|
41
38
|
def status_validate_enum(cls, value):
|
|
@@ -83,11 +80,6 @@ class GetStandingByIdResult(BaseModel):
|
|
|
83
80
|
exclude=excluded_fields,
|
|
84
81
|
exclude_none=True,
|
|
85
82
|
)
|
|
86
|
-
# set to None if score (nullable) is None
|
|
87
|
-
# and model_fields_set contains the field
|
|
88
|
-
if self.score is None and "score" in self.model_fields_set:
|
|
89
|
-
_dict['score'] = None
|
|
90
|
-
|
|
91
83
|
return _dict
|
|
92
84
|
|
|
93
85
|
@classmethod
|
|
@@ -105,10 +97,7 @@ class GetStandingByIdResult(BaseModel):
|
|
|
105
97
|
"benchmarkId": obj.get("benchmarkId"),
|
|
106
98
|
"datasetId": obj.get("datasetId"),
|
|
107
99
|
"status": obj.get("status"),
|
|
108
|
-
"isDisabled": obj.get("isDisabled")
|
|
109
|
-
"score": obj.get("score"),
|
|
110
|
-
"wins": obj.get("wins"),
|
|
111
|
-
"totalMatches": obj.get("totalMatches")
|
|
100
|
+
"isDisabled": obj.get("isDisabled")
|
|
112
101
|
})
|
|
113
102
|
return _obj
|
|
114
103
|
|
|
@@ -28,12 +28,13 @@ class PromptByBenchmarkResult(BaseModel):
|
|
|
28
28
|
"""
|
|
29
29
|
PromptByBenchmarkResult
|
|
30
30
|
""" # noqa: E501
|
|
31
|
+
id: StrictStr
|
|
31
32
|
prompt: Optional[StrictStr] = None
|
|
32
33
|
prompt_asset: Optional[GetValidationRapidsResultAsset] = Field(default=None, alias="promptAsset")
|
|
33
34
|
identifier: StrictStr
|
|
34
35
|
created_at: datetime = Field(alias="createdAt")
|
|
35
36
|
tags: List[StrictStr]
|
|
36
|
-
__properties: ClassVar[List[str]] = ["prompt", "promptAsset", "identifier", "createdAt", "tags"]
|
|
37
|
+
__properties: ClassVar[List[str]] = ["id", "prompt", "promptAsset", "identifier", "createdAt", "tags"]
|
|
37
38
|
|
|
38
39
|
model_config = ConfigDict(
|
|
39
40
|
populate_by_name=True,
|
|
@@ -99,6 +100,7 @@ class PromptByBenchmarkResult(BaseModel):
|
|
|
99
100
|
return cls.model_validate(obj)
|
|
100
101
|
|
|
101
102
|
_obj = cls.model_validate({
|
|
103
|
+
"id": obj.get("id"),
|
|
102
104
|
"prompt": obj.get("prompt"),
|
|
103
105
|
"promptAsset": GetValidationRapidsResultAsset.from_dict(obj["promptAsset"]) if obj.get("promptAsset") is not None else None,
|
|
104
106
|
"identifier": obj.get("identifier"),
|
|
@@ -29,7 +29,7 @@ class StandingByLeaderboard(BaseModel):
|
|
|
29
29
|
id: StrictStr
|
|
30
30
|
name: StrictStr
|
|
31
31
|
leaderboard_id: StrictStr = Field(alias="leaderboardId")
|
|
32
|
-
dataset_id: StrictStr = Field(alias="datasetId")
|
|
32
|
+
dataset_id: Optional[StrictStr] = Field(default=None, alias="datasetId")
|
|
33
33
|
status: StrictStr
|
|
34
34
|
score: Optional[Union[StrictFloat, StrictInt]] = None
|
|
35
35
|
wins: StrictInt
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Rapidata.Dataset
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: v1
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict
|
|
21
|
+
from typing import Any, ClassVar, Dict, List
|
|
22
|
+
from rapidata.api_client.models.standing_by_leaderboard import StandingByLeaderboard
|
|
23
|
+
from typing import Optional, Set
|
|
24
|
+
from typing_extensions import Self
|
|
25
|
+
|
|
26
|
+
class StandingsByLeaderboardResult(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
StandingsByLeaderboardResult
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
items: List[StandingByLeaderboard]
|
|
31
|
+
__properties: ClassVar[List[str]] = ["items"]
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
populate_by_name=True,
|
|
35
|
+
validate_assignment=True,
|
|
36
|
+
protected_namespaces=(),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def to_str(self) -> str:
|
|
41
|
+
"""Returns the string representation of the model using alias"""
|
|
42
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
43
|
+
|
|
44
|
+
def to_json(self) -> str:
|
|
45
|
+
"""Returns the JSON representation of the model using alias"""
|
|
46
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
47
|
+
return json.dumps(self.to_dict())
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
+
"""Create an instance of StandingsByLeaderboardResult from a JSON string"""
|
|
52
|
+
return cls.from_dict(json.loads(json_str))
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Return the dictionary representation of the model using alias.
|
|
56
|
+
|
|
57
|
+
This has the following differences from calling pydantic's
|
|
58
|
+
`self.model_dump(by_alias=True)`:
|
|
59
|
+
|
|
60
|
+
* `None` is only added to the output dict for nullable fields that
|
|
61
|
+
were set at model initialization. Other fields with value `None`
|
|
62
|
+
are ignored.
|
|
63
|
+
"""
|
|
64
|
+
excluded_fields: Set[str] = set([
|
|
65
|
+
])
|
|
66
|
+
|
|
67
|
+
_dict = self.model_dump(
|
|
68
|
+
by_alias=True,
|
|
69
|
+
exclude=excluded_fields,
|
|
70
|
+
exclude_none=True,
|
|
71
|
+
)
|
|
72
|
+
# override the default output from pydantic by calling `to_dict()` of each item in items (list)
|
|
73
|
+
_items = []
|
|
74
|
+
if self.items:
|
|
75
|
+
for _item_items in self.items:
|
|
76
|
+
if _item_items:
|
|
77
|
+
_items.append(_item_items.to_dict())
|
|
78
|
+
_dict['items'] = _items
|
|
79
|
+
return _dict
|
|
80
|
+
|
|
81
|
+
@classmethod
|
|
82
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
83
|
+
"""Create an instance of StandingsByLeaderboardResult from a dict"""
|
|
84
|
+
if obj is None:
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
if not isinstance(obj, dict):
|
|
88
|
+
return cls.model_validate(obj)
|
|
89
|
+
|
|
90
|
+
_obj = cls.model_validate({
|
|
91
|
+
"items": [StandingByLeaderboard.from_dict(_item) for _item in obj["items"]] if obj.get("items") is not None else None
|
|
92
|
+
})
|
|
93
|
+
return _obj
|
|
94
|
+
|
|
95
|
+
|