rapidata 2.34.2__py3-none-any.whl → 2.35.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rapidata might be problematic. Click here for more details.

rapidata/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "2.34.2"
1
+ __version__ = "2.35.1"
2
2
 
3
3
  from .rapidata_client import (
4
4
  RapidataClient,
@@ -404,6 +404,7 @@ from rapidata.api_client.models.update_campaign_model import UpdateCampaignModel
404
404
  from rapidata.api_client.models.update_dataset_name_model import UpdateDatasetNameModel
405
405
  from rapidata.api_client.models.update_dimensions_model import UpdateDimensionsModel
406
406
  from rapidata.api_client.models.update_leaderboard_name_model import UpdateLeaderboardNameModel
407
+ from rapidata.api_client.models.update_leaderboard_response_config_model import UpdateLeaderboardResponseConfigModel
407
408
  from rapidata.api_client.models.update_order_name_model import UpdateOrderNameModel
408
409
  from rapidata.api_client.models.update_participant_name_model import UpdateParticipantNameModel
409
410
  from rapidata.api_client.models.update_prompt_tags_model import UpdatePromptTagsModel
@@ -34,6 +34,7 @@ from rapidata.api_client.models.standing_by_leaderboard_paged_result import Stan
34
34
  from rapidata.api_client.models.standings_by_leaderboard_result import StandingsByLeaderboardResult
35
35
  from rapidata.api_client.models.submit_participant_result import SubmitParticipantResult
36
36
  from rapidata.api_client.models.update_leaderboard_name_model import UpdateLeaderboardNameModel
37
+ from rapidata.api_client.models.update_leaderboard_response_config_model import UpdateLeaderboardResponseConfigModel
37
38
 
38
39
  from rapidata.api_client.api_client import ApiClient, RequestSerialized
39
40
  from rapidata.api_client.api_response import ApiResponse
@@ -3098,6 +3099,288 @@ class LeaderboardApi:
3098
3099
 
3099
3100
 
3100
3101
 
3102
+ @validate_call
3103
+ def leaderboard_leaderboard_id_response_config_put(
3104
+ self,
3105
+ leaderboard_id: StrictStr,
3106
+ update_leaderboard_response_config_model: Optional[UpdateLeaderboardResponseConfigModel] = None,
3107
+ _request_timeout: Union[
3108
+ None,
3109
+ Annotated[StrictFloat, Field(gt=0)],
3110
+ Tuple[
3111
+ Annotated[StrictFloat, Field(gt=0)],
3112
+ Annotated[StrictFloat, Field(gt=0)]
3113
+ ]
3114
+ ] = None,
3115
+ _request_auth: Optional[Dict[StrictStr, Any]] = None,
3116
+ _content_type: Optional[StrictStr] = None,
3117
+ _headers: Optional[Dict[StrictStr, Any]] = None,
3118
+ _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
3119
+ ) -> None:
3120
+ """Updates the name of a leaderboard.
3121
+
3122
+
3123
+ :param leaderboard_id: (required)
3124
+ :type leaderboard_id: str
3125
+ :param update_leaderboard_response_config_model:
3126
+ :type update_leaderboard_response_config_model: UpdateLeaderboardResponseConfigModel
3127
+ :param _request_timeout: timeout setting for this request. If one
3128
+ number provided, it will be total request
3129
+ timeout. It can also be a pair (tuple) of
3130
+ (connection, read) timeouts.
3131
+ :type _request_timeout: int, tuple(int, int), optional
3132
+ :param _request_auth: set to override the auth_settings for an a single
3133
+ request; this effectively ignores the
3134
+ authentication in the spec for a single request.
3135
+ :type _request_auth: dict, optional
3136
+ :param _content_type: force content-type for the request.
3137
+ :type _content_type: str, Optional
3138
+ :param _headers: set to override the headers for a single
3139
+ request; this effectively ignores the headers
3140
+ in the spec for a single request.
3141
+ :type _headers: dict, optional
3142
+ :param _host_index: set to override the host_index for a single
3143
+ request; this effectively ignores the host_index
3144
+ in the spec for a single request.
3145
+ :type _host_index: int, optional
3146
+ :return: Returns the result object.
3147
+ """ # noqa: E501
3148
+
3149
+ _param = self._leaderboard_leaderboard_id_response_config_put_serialize(
3150
+ leaderboard_id=leaderboard_id,
3151
+ update_leaderboard_response_config_model=update_leaderboard_response_config_model,
3152
+ _request_auth=_request_auth,
3153
+ _content_type=_content_type,
3154
+ _headers=_headers,
3155
+ _host_index=_host_index
3156
+ )
3157
+
3158
+ _response_types_map: Dict[str, Optional[str]] = {
3159
+ '204': None,
3160
+ }
3161
+ response_data = self.api_client.call_api(
3162
+ *_param,
3163
+ _request_timeout=_request_timeout
3164
+ )
3165
+ response_data.read()
3166
+ return self.api_client.response_deserialize(
3167
+ response_data=response_data,
3168
+ response_types_map=_response_types_map,
3169
+ ).data
3170
+
3171
+
3172
+ @validate_call
3173
+ def leaderboard_leaderboard_id_response_config_put_with_http_info(
3174
+ self,
3175
+ leaderboard_id: StrictStr,
3176
+ update_leaderboard_response_config_model: Optional[UpdateLeaderboardResponseConfigModel] = None,
3177
+ _request_timeout: Union[
3178
+ None,
3179
+ Annotated[StrictFloat, Field(gt=0)],
3180
+ Tuple[
3181
+ Annotated[StrictFloat, Field(gt=0)],
3182
+ Annotated[StrictFloat, Field(gt=0)]
3183
+ ]
3184
+ ] = None,
3185
+ _request_auth: Optional[Dict[StrictStr, Any]] = None,
3186
+ _content_type: Optional[StrictStr] = None,
3187
+ _headers: Optional[Dict[StrictStr, Any]] = None,
3188
+ _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
3189
+ ) -> ApiResponse[None]:
3190
+ """Updates the name of a leaderboard.
3191
+
3192
+
3193
+ :param leaderboard_id: (required)
3194
+ :type leaderboard_id: str
3195
+ :param update_leaderboard_response_config_model:
3196
+ :type update_leaderboard_response_config_model: UpdateLeaderboardResponseConfigModel
3197
+ :param _request_timeout: timeout setting for this request. If one
3198
+ number provided, it will be total request
3199
+ timeout. It can also be a pair (tuple) of
3200
+ (connection, read) timeouts.
3201
+ :type _request_timeout: int, tuple(int, int), optional
3202
+ :param _request_auth: set to override the auth_settings for an a single
3203
+ request; this effectively ignores the
3204
+ authentication in the spec for a single request.
3205
+ :type _request_auth: dict, optional
3206
+ :param _content_type: force content-type for the request.
3207
+ :type _content_type: str, Optional
3208
+ :param _headers: set to override the headers for a single
3209
+ request; this effectively ignores the headers
3210
+ in the spec for a single request.
3211
+ :type _headers: dict, optional
3212
+ :param _host_index: set to override the host_index for a single
3213
+ request; this effectively ignores the host_index
3214
+ in the spec for a single request.
3215
+ :type _host_index: int, optional
3216
+ :return: Returns the result object.
3217
+ """ # noqa: E501
3218
+
3219
+ _param = self._leaderboard_leaderboard_id_response_config_put_serialize(
3220
+ leaderboard_id=leaderboard_id,
3221
+ update_leaderboard_response_config_model=update_leaderboard_response_config_model,
3222
+ _request_auth=_request_auth,
3223
+ _content_type=_content_type,
3224
+ _headers=_headers,
3225
+ _host_index=_host_index
3226
+ )
3227
+
3228
+ _response_types_map: Dict[str, Optional[str]] = {
3229
+ '204': None,
3230
+ }
3231
+ response_data = self.api_client.call_api(
3232
+ *_param,
3233
+ _request_timeout=_request_timeout
3234
+ )
3235
+ response_data.read()
3236
+ return self.api_client.response_deserialize(
3237
+ response_data=response_data,
3238
+ response_types_map=_response_types_map,
3239
+ )
3240
+
3241
+
3242
+ @validate_call
3243
+ def leaderboard_leaderboard_id_response_config_put_without_preload_content(
3244
+ self,
3245
+ leaderboard_id: StrictStr,
3246
+ update_leaderboard_response_config_model: Optional[UpdateLeaderboardResponseConfigModel] = None,
3247
+ _request_timeout: Union[
3248
+ None,
3249
+ Annotated[StrictFloat, Field(gt=0)],
3250
+ Tuple[
3251
+ Annotated[StrictFloat, Field(gt=0)],
3252
+ Annotated[StrictFloat, Field(gt=0)]
3253
+ ]
3254
+ ] = None,
3255
+ _request_auth: Optional[Dict[StrictStr, Any]] = None,
3256
+ _content_type: Optional[StrictStr] = None,
3257
+ _headers: Optional[Dict[StrictStr, Any]] = None,
3258
+ _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
3259
+ ) -> RESTResponseType:
3260
+ """Updates the name of a leaderboard.
3261
+
3262
+
3263
+ :param leaderboard_id: (required)
3264
+ :type leaderboard_id: str
3265
+ :param update_leaderboard_response_config_model:
3266
+ :type update_leaderboard_response_config_model: UpdateLeaderboardResponseConfigModel
3267
+ :param _request_timeout: timeout setting for this request. If one
3268
+ number provided, it will be total request
3269
+ timeout. It can also be a pair (tuple) of
3270
+ (connection, read) timeouts.
3271
+ :type _request_timeout: int, tuple(int, int), optional
3272
+ :param _request_auth: set to override the auth_settings for an a single
3273
+ request; this effectively ignores the
3274
+ authentication in the spec for a single request.
3275
+ :type _request_auth: dict, optional
3276
+ :param _content_type: force content-type for the request.
3277
+ :type _content_type: str, Optional
3278
+ :param _headers: set to override the headers for a single
3279
+ request; this effectively ignores the headers
3280
+ in the spec for a single request.
3281
+ :type _headers: dict, optional
3282
+ :param _host_index: set to override the host_index for a single
3283
+ request; this effectively ignores the host_index
3284
+ in the spec for a single request.
3285
+ :type _host_index: int, optional
3286
+ :return: Returns the result object.
3287
+ """ # noqa: E501
3288
+
3289
+ _param = self._leaderboard_leaderboard_id_response_config_put_serialize(
3290
+ leaderboard_id=leaderboard_id,
3291
+ update_leaderboard_response_config_model=update_leaderboard_response_config_model,
3292
+ _request_auth=_request_auth,
3293
+ _content_type=_content_type,
3294
+ _headers=_headers,
3295
+ _host_index=_host_index
3296
+ )
3297
+
3298
+ _response_types_map: Dict[str, Optional[str]] = {
3299
+ '204': None,
3300
+ }
3301
+ response_data = self.api_client.call_api(
3302
+ *_param,
3303
+ _request_timeout=_request_timeout
3304
+ )
3305
+ return response_data.response
3306
+
3307
+
3308
+ def _leaderboard_leaderboard_id_response_config_put_serialize(
3309
+ self,
3310
+ leaderboard_id,
3311
+ update_leaderboard_response_config_model,
3312
+ _request_auth,
3313
+ _content_type,
3314
+ _headers,
3315
+ _host_index,
3316
+ ) -> RequestSerialized:
3317
+
3318
+ _host = None
3319
+
3320
+ _collection_formats: Dict[str, str] = {
3321
+ }
3322
+
3323
+ _path_params: Dict[str, str] = {}
3324
+ _query_params: List[Tuple[str, str]] = []
3325
+ _header_params: Dict[str, Optional[str]] = _headers or {}
3326
+ _form_params: List[Tuple[str, str]] = []
3327
+ _files: Dict[
3328
+ str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]]
3329
+ ] = {}
3330
+ _body_params: Optional[bytes] = None
3331
+
3332
+ # process the path parameters
3333
+ if leaderboard_id is not None:
3334
+ _path_params['leaderboardId'] = leaderboard_id
3335
+ # process the query parameters
3336
+ # process the header parameters
3337
+ # process the form parameters
3338
+ # process the body parameter
3339
+ if update_leaderboard_response_config_model is not None:
3340
+ _body_params = update_leaderboard_response_config_model
3341
+
3342
+
3343
+
3344
+ # set the HTTP header `Content-Type`
3345
+ if _content_type:
3346
+ _header_params['Content-Type'] = _content_type
3347
+ else:
3348
+ _default_content_type = (
3349
+ self.api_client.select_header_content_type(
3350
+ [
3351
+ 'application/json',
3352
+ 'text/json',
3353
+ 'application/*+json'
3354
+ ]
3355
+ )
3356
+ )
3357
+ if _default_content_type is not None:
3358
+ _header_params['Content-Type'] = _default_content_type
3359
+
3360
+ # authentication setting
3361
+ _auth_settings: List[str] = [
3362
+ 'bearer',
3363
+ 'oauth2'
3364
+ ]
3365
+
3366
+ return self.api_client.param_serialize(
3367
+ method='PUT',
3368
+ resource_path='/leaderboard/{leaderboardId}/response-config',
3369
+ path_params=_path_params,
3370
+ query_params=_query_params,
3371
+ header_params=_header_params,
3372
+ body=_body_params,
3373
+ post_params=_form_params,
3374
+ files=_files,
3375
+ auth_settings=_auth_settings,
3376
+ collection_formats=_collection_formats,
3377
+ _host=_host,
3378
+ _request_auth=_request_auth
3379
+ )
3380
+
3381
+
3382
+
3383
+
3101
3384
  @validate_call
3102
3385
  def leaderboard_leaderboard_id_runs_get(
3103
3386
  self,
@@ -365,6 +365,7 @@ from rapidata.api_client.models.update_campaign_model import UpdateCampaignModel
365
365
  from rapidata.api_client.models.update_dataset_name_model import UpdateDatasetNameModel
366
366
  from rapidata.api_client.models.update_dimensions_model import UpdateDimensionsModel
367
367
  from rapidata.api_client.models.update_leaderboard_name_model import UpdateLeaderboardNameModel
368
+ from rapidata.api_client.models.update_leaderboard_response_config_model import UpdateLeaderboardResponseConfigModel
368
369
  from rapidata.api_client.models.update_order_name_model import UpdateOrderNameModel
369
370
  from rapidata.api_client.models.update_participant_name_model import UpdateParticipantNameModel
370
371
  from rapidata.api_client.models.update_prompt_tags_model import UpdatePromptTagsModel
@@ -39,8 +39,8 @@ class Filter(BaseModel):
39
39
  if value is None:
40
40
  return value
41
41
 
42
- if value not in set(['Eq', 'Neq', 'Gt', 'Gte', 'Lt', 'Lte', 'Contains', 'StartsWith', 'EndsWith']):
43
- raise ValueError("must be one of enum values ('Eq', 'Neq', 'Gt', 'Gte', 'Lt', 'Lte', 'Contains', 'StartsWith', 'EndsWith')")
42
+ if value not in set(['Eq', 'Neq', 'Gt', 'Gte', 'Lt', 'Lte', 'Contains', 'StartsWith', 'EndsWith', 'In']):
43
+ raise ValueError("must be one of enum values ('Eq', 'Neq', 'Gt', 'Gte', 'Lt', 'Lte', 'Contains', 'StartsWith', 'EndsWith', 'In')")
44
44
  return value
45
45
 
46
46
  @field_validator('logic')
@@ -35,6 +35,7 @@ class FilterOperator(str, Enum):
35
35
  CONTAINS = 'Contains'
36
36
  STARTSWITH = 'StartsWith'
37
37
  ENDSWITH = 'EndsWith'
38
+ IN = 'In'
38
39
 
39
40
  @classmethod
40
41
  def from_json(cls, json_str: str) -> Self:
@@ -34,10 +34,11 @@ class SampleByParticipant(BaseModel):
34
34
  asset: DatapointAsset
35
35
  prompt: Optional[StrictStr] = None
36
36
  prompt_asset: Optional[GetValidationRapidsResultAsset] = Field(default=None, alias="promptAsset")
37
+ tags: List[StrictStr]
37
38
  created_at: Optional[datetime] = Field(default=None, alias="createdAt")
38
39
  owner_id: Optional[StrictStr] = Field(default=None, alias="ownerId")
39
40
  owner_mail: StrictStr = Field(alias="ownerMail")
40
- __properties: ClassVar[List[str]] = ["id", "identifier", "asset", "prompt", "promptAsset", "createdAt", "ownerId", "ownerMail"]
41
+ __properties: ClassVar[List[str]] = ["id", "identifier", "asset", "prompt", "promptAsset", "tags", "createdAt", "ownerId", "ownerMail"]
41
42
 
42
43
  model_config = ConfigDict(
43
44
  populate_by_name=True,
@@ -111,6 +112,7 @@ class SampleByParticipant(BaseModel):
111
112
  "asset": DatapointAsset.from_dict(obj["asset"]) if obj.get("asset") is not None else None,
112
113
  "prompt": obj.get("prompt"),
113
114
  "promptAsset": GetValidationRapidsResultAsset.from_dict(obj["promptAsset"]) if obj.get("promptAsset") is not None else None,
115
+ "tags": obj.get("tags"),
114
116
  "createdAt": obj.get("createdAt"),
115
117
  "ownerId": obj.get("ownerId"),
116
118
  "ownerMail": obj.get("ownerMail")
@@ -0,0 +1,89 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Rapidata.Dataset
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: v1
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictInt
21
+ from typing import Any, ClassVar, Dict, List
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class UpdateLeaderboardResponseConfigModel(BaseModel):
26
+ """
27
+ The model used to update the name of a leaderboard.
28
+ """ # noqa: E501
29
+ response_budget: StrictInt = Field(description="The amount of responses that will be collected when onboarding a new participant.", alias="responseBudget")
30
+ min_responses: StrictInt = Field(description="The amount of responses that will be collected as a minimum on each matchup.", alias="minResponses")
31
+ __properties: ClassVar[List[str]] = ["responseBudget", "minResponses"]
32
+
33
+ model_config = ConfigDict(
34
+ populate_by_name=True,
35
+ validate_assignment=True,
36
+ protected_namespaces=(),
37
+ )
38
+
39
+
40
+ def to_str(self) -> str:
41
+ """Returns the string representation of the model using alias"""
42
+ return pprint.pformat(self.model_dump(by_alias=True))
43
+
44
+ def to_json(self) -> str:
45
+ """Returns the JSON representation of the model using alias"""
46
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
47
+ return json.dumps(self.to_dict())
48
+
49
+ @classmethod
50
+ def from_json(cls, json_str: str) -> Optional[Self]:
51
+ """Create an instance of UpdateLeaderboardResponseConfigModel from a JSON string"""
52
+ return cls.from_dict(json.loads(json_str))
53
+
54
+ def to_dict(self) -> Dict[str, Any]:
55
+ """Return the dictionary representation of the model using alias.
56
+
57
+ This has the following differences from calling pydantic's
58
+ `self.model_dump(by_alias=True)`:
59
+
60
+ * `None` is only added to the output dict for nullable fields that
61
+ were set at model initialization. Other fields with value `None`
62
+ are ignored.
63
+ """
64
+ excluded_fields: Set[str] = set([
65
+ ])
66
+
67
+ _dict = self.model_dump(
68
+ by_alias=True,
69
+ exclude=excluded_fields,
70
+ exclude_none=True,
71
+ )
72
+ return _dict
73
+
74
+ @classmethod
75
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
76
+ """Create an instance of UpdateLeaderboardResponseConfigModel from a dict"""
77
+ if obj is None:
78
+ return None
79
+
80
+ if not isinstance(obj, dict):
81
+ return cls.model_validate(obj)
82
+
83
+ _obj = cls.model_validate({
84
+ "responseBudget": obj.get("responseBudget"),
85
+ "minResponses": obj.get("minResponses")
86
+ })
87
+ return _obj
88
+
89
+
@@ -135,6 +135,7 @@ Class | Method | HTTP request | Description
135
135
  *LeaderboardApi* | [**leaderboard_leaderboard_id_participants_post**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_participants_post) | **POST** /leaderboard/{leaderboardId}/participants | Creates a participant in a leaderboard.
136
136
  *LeaderboardApi* | [**leaderboard_leaderboard_id_prompts_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_prompts_get) | **GET** /leaderboard/{leaderboardId}/prompts | returns the paged prompts of a leaderboard by its ID.
137
137
  *LeaderboardApi* | [**leaderboard_leaderboard_id_prompts_post**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_prompts_post) | **POST** /leaderboard/{leaderboardId}/prompts | adds a new prompt to a leaderboard.
138
+ *LeaderboardApi* | [**leaderboard_leaderboard_id_response_config_put**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_response_config_put) | **PUT** /leaderboard/{leaderboardId}/response-config | Updates the name of a leaderboard.
138
139
  *LeaderboardApi* | [**leaderboard_leaderboard_id_runs_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_runs_get) | **GET** /leaderboard/{leaderboardId}/runs | Gets the runs related to a leaderboard
139
140
  *LeaderboardApi* | [**leaderboard_leaderboard_id_standings_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_standings_get) | **GET** /leaderboard/{leaderboardId}/standings | queries all the participants connected to leaderboard by its ID.
140
141
  *LeaderboardApi* | [**leaderboard_post**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_post) | **POST** /leaderboard | Creates a new leaderboard with the specified name and criteria.
@@ -563,6 +564,7 @@ Class | Method | HTTP request | Description
563
564
  - [UpdateDatasetNameModel](rapidata/api_client/docs/UpdateDatasetNameModel.md)
564
565
  - [UpdateDimensionsModel](rapidata/api_client/docs/UpdateDimensionsModel.md)
565
566
  - [UpdateLeaderboardNameModel](rapidata/api_client/docs/UpdateLeaderboardNameModel.md)
567
+ - [UpdateLeaderboardResponseConfigModel](rapidata/api_client/docs/UpdateLeaderboardResponseConfigModel.md)
566
568
  - [UpdateOrderNameModel](rapidata/api_client/docs/UpdateOrderNameModel.md)
567
569
  - [UpdateParticipantNameModel](rapidata/api_client/docs/UpdateParticipantNameModel.md)
568
570
  - [UpdatePromptTagsModel](rapidata/api_client/docs/UpdatePromptTagsModel.md)
@@ -0,0 +1,32 @@
1
+ from typing import Literal
2
+
3
+
4
+ class DetailMapper:
5
+ @staticmethod
6
+ def get_budget(
7
+ level_of_detail: Literal["low", "medium", "high", "very high"]
8
+ ) -> int:
9
+ if level_of_detail == "low":
10
+ return 2_000
11
+ elif level_of_detail == "medium":
12
+ return 4_000
13
+ elif level_of_detail == "high":
14
+ return 8_000
15
+ elif level_of_detail == "very high":
16
+ return 16_000
17
+ else:
18
+ raise ValueError(
19
+ "Invalid level of detail. Must be one of: 'low', 'medium', 'high', 'very high'"
20
+ )
21
+
22
+ @staticmethod
23
+ def get_level_of_detail(
24
+ response_budget: int,
25
+ ) -> Literal["low", "medium", "high", "very high"]:
26
+ if response_budget < 4_000:
27
+ return "low"
28
+ elif response_budget < 8_000:
29
+ return "medium"
30
+ elif response_budget < 16_000:
31
+ return "high"
32
+ return "very high"
@@ -1,7 +1,12 @@
1
1
  import pandas as pd
2
- from typing import Optional
2
+ from typing import Literal, Optional
3
3
 
4
+ from rapidata.rapidata_client.logging import logger
5
+ from rapidata.rapidata_client.benchmark._detail_mapper import DetailMapper
4
6
  from rapidata.service.openapi_service import OpenAPIService
7
+ from rapidata.api_client.models.update_leaderboard_response_config_model import (
8
+ UpdateLeaderboardResponseConfigModel,
9
+ )
5
10
 
6
11
 
7
12
  class RapidataLeaderboard:
@@ -17,68 +22,111 @@ class RapidataLeaderboard:
17
22
  id: The ID of the leaderboard.
18
23
  openapi_service: The OpenAPIService instance for API interaction.
19
24
  """
20
- def __init__(self,
21
- name: str,
22
- instruction: str,
23
- show_prompt: bool,
24
- show_prompt_asset: bool,
25
- inverse_ranking: bool,
26
- min_responses: int,
27
- response_budget: int,
28
- id: str,
29
- openapi_service: OpenAPIService):
25
+
26
+ def __init__(
27
+ self,
28
+ name: str,
29
+ instruction: str,
30
+ show_prompt: bool,
31
+ show_prompt_asset: bool,
32
+ inverse_ranking: bool,
33
+ response_budget: int,
34
+ min_responses_per_matchup: int,
35
+ id: str,
36
+ openapi_service: OpenAPIService,
37
+ ):
30
38
  self.__openapi_service = openapi_service
31
39
  self.__name = name
32
40
  self.__instruction = instruction
33
41
  self.__show_prompt = show_prompt
34
42
  self.__show_prompt_asset = show_prompt_asset
35
43
  self.__inverse_ranking = inverse_ranking
36
- self.__min_responses = min_responses
37
44
  self.__response_budget = response_budget
45
+ self.__min_responses_per_matchup = min_responses_per_matchup
38
46
  self.id = id
39
47
 
40
48
  @property
41
- def response_budget(self) -> int:
49
+ def level_of_detail(self) -> Literal["low", "medium", "high", "very high"]:
50
+ """
51
+ Returns the level of detail of the leaderboard.
52
+ """
53
+ return DetailMapper.get_level_of_detail(self.__response_budget)
54
+
55
+ @level_of_detail.setter
56
+ def level_of_detail(
57
+ self, level_of_detail: Literal["low", "medium", "high", "very high"]
58
+ ):
42
59
  """
43
- Returns the response budget of the leaderboard.
60
+ Sets the level of detail of the leaderboard.
44
61
  """
45
- return self.__response_budget
46
-
62
+ logger.debug(f"Setting level of detail to {level_of_detail}")
63
+ self.__openapi_service.leaderboard_api.leaderboard_leaderboard_id_response_config_put(
64
+ leaderboard_id=self.id,
65
+ update_leaderboard_response_config_model=UpdateLeaderboardResponseConfigModel(
66
+ responseBudget=DetailMapper.get_budget(level_of_detail),
67
+ minResponses=self.__min_responses_per_matchup,
68
+ ),
69
+ )
70
+ self.__response_budget = DetailMapper.get_budget(level_of_detail)
71
+
47
72
  @property
48
- def min_responses(self) -> int:
73
+ def min_responses_per_matchup(self) -> int:
49
74
  """
50
75
  Returns the minimum number of responses required to be considered for the leaderboard.
51
76
  """
52
- return self.__min_responses
53
-
77
+ return self.__min_responses_per_matchup
78
+
79
+ @min_responses_per_matchup.setter
80
+ def min_responses_per_matchup(self, min_responses: int):
81
+ """
82
+ Sets the minimum number of responses required to be considered for the leaderboard.
83
+ """
84
+ if not isinstance(min_responses, int):
85
+ raise ValueError("Min responses per matchup must be an integer")
86
+
87
+ if min_responses < 3:
88
+ raise ValueError("Min responses per matchup must be at least 3")
89
+
90
+ logger.debug(
91
+ f"Setting min responses per matchup to {min_responses} for leaderboard {self.name}"
92
+ )
93
+ self.__openapi_service.leaderboard_api.leaderboard_leaderboard_id_response_config_put(
94
+ leaderboard_id=self.id,
95
+ update_leaderboard_response_config_model=UpdateLeaderboardResponseConfigModel(
96
+ responseBudget=self.__response_budget,
97
+ minResponses=min_responses,
98
+ ),
99
+ )
100
+ self.__min_responses_per_matchup = min_responses
101
+
54
102
  @property
55
103
  def show_prompt_asset(self) -> bool:
56
104
  """
57
105
  Returns whether the prompt asset is shown to the users.
58
106
  """
59
107
  return self.__show_prompt_asset
60
-
108
+
61
109
  @property
62
110
  def inverse_ranking(self) -> bool:
63
111
  """
64
112
  Returns whether the ranking is inverse.
65
113
  """
66
114
  return self.__inverse_ranking
67
-
115
+
68
116
  @property
69
117
  def show_prompt(self) -> bool:
70
118
  """
71
119
  Returns whether the prompt is shown to the users.
72
120
  """
73
121
  return self.__show_prompt
74
-
122
+
75
123
  @property
76
124
  def instruction(self) -> str:
77
125
  """
78
126
  Returns the instruction of the leaderboard.
79
127
  """
80
128
  return self.__instruction
81
-
129
+
82
130
  @property
83
131
  def name(self) -> str:
84
132
  """
@@ -100,28 +148,28 @@ class RapidataLeaderboard:
100
148
  """
101
149
 
102
150
  participants = self.__openapi_service.leaderboard_api.leaderboard_leaderboard_id_standings_get(
103
- leaderboard_id=self.id,
104
- tags=tags
151
+ leaderboard_id=self.id, tags=tags
105
152
  )
106
153
 
107
154
  standings = []
108
155
  for participant in participants.items:
109
- standings.append({
110
- "name": participant.name,
111
- "wins": participant.wins,
112
- "total_matches": participant.total_matches,
113
- "score": round(participant.score, 2) if participant.score is not None else None,
114
- })
156
+ standings.append(
157
+ {
158
+ "name": participant.name,
159
+ "wins": participant.wins,
160
+ "total_matches": participant.total_matches,
161
+ "score": (
162
+ round(participant.score, 2)
163
+ if participant.score is not None
164
+ else None
165
+ ),
166
+ }
167
+ )
115
168
 
116
169
  return pd.DataFrame(standings)
117
170
 
118
171
  def __str__(self) -> str:
119
172
  return f"RapidataLeaderboard(name={self.name}, instruction={self.instruction}, show_prompt={self.show_prompt}, leaderboard_id={self.id})"
120
-
173
+
121
174
  def __repr__(self) -> str:
122
175
  return self.__str__()
123
-
124
-
125
-
126
-
127
-
@@ -1,24 +1,34 @@
1
1
  import re
2
- from typing import Optional
2
+ from typing import Literal, Optional
3
3
  from rapidata.api_client.models.root_filter import RootFilter
4
4
  from rapidata.api_client.models.filter import Filter
5
5
  from rapidata.api_client.models.query_model import QueryModel
6
6
  from rapidata.api_client.models.page_info import PageInfo
7
7
  from rapidata.api_client.models.create_leaderboard_model import CreateLeaderboardModel
8
- from rapidata.api_client.models.create_benchmark_participant_model import CreateBenchmarkParticipantModel
8
+ from rapidata.api_client.models.create_benchmark_participant_model import (
9
+ CreateBenchmarkParticipantModel,
10
+ )
9
11
  from rapidata.api_client.models.submit_prompt_model import SubmitPromptModel
10
- from rapidata.api_client.models.submit_prompt_model_prompt_asset import SubmitPromptModelPromptAsset
12
+ from rapidata.api_client.models.submit_prompt_model_prompt_asset import (
13
+ SubmitPromptModelPromptAsset,
14
+ )
11
15
  from rapidata.api_client.models.url_asset_input import UrlAssetInput
12
16
  from rapidata.api_client.models.file_asset_model import FileAssetModel
13
17
  from rapidata.api_client.models.source_url_metadata_model import SourceUrlMetadataModel
14
18
 
15
19
 
16
- from rapidata.rapidata_client.benchmark.participant._participant import BenchmarkParticipant
20
+ from rapidata.rapidata_client.benchmark.participant._participant import (
21
+ BenchmarkParticipant,
22
+ )
17
23
  from rapidata.rapidata_client.logging import logger
18
24
  from rapidata.service.openapi_service import OpenAPIService
19
25
 
20
- from rapidata.rapidata_client.benchmark.leaderboard.rapidata_leaderboard import RapidataLeaderboard
26
+ from rapidata.rapidata_client.benchmark.leaderboard.rapidata_leaderboard import (
27
+ RapidataLeaderboard,
28
+ )
21
29
  from rapidata.rapidata_client.datapoints.assets import MediaAsset
30
+ from rapidata.rapidata_client.benchmark._detail_mapper import DetailMapper
31
+
22
32
 
23
33
  class RapidataBenchmark:
24
34
  """
@@ -31,6 +41,7 @@ class RapidataBenchmark:
31
41
  id: The id of the benchmark.
32
42
  openapi_service: The OpenAPI service to use to interact with the Rapidata API.
33
43
  """
44
+
34
45
  def __init__(self, name: str, id: str, openapi_service: OpenAPIService):
35
46
  self.name = name
36
47
  self.id = id
@@ -40,51 +51,54 @@ class RapidataBenchmark:
40
51
  self.__leaderboards: list[RapidataLeaderboard] = []
41
52
  self.__identifiers: list[str] = []
42
53
  self.__tags: list[list[str]] = []
43
-
54
+
44
55
  def __instantiate_prompts(self) -> None:
45
56
  current_page = 1
46
57
  total_pages = None
47
-
58
+
48
59
  while True:
49
- prompts_result = self.__openapi_service.benchmark_api.benchmark_benchmark_id_prompts_get(
50
- benchmark_id=self.id,
51
- request=QueryModel(
52
- page=PageInfo(
53
- index=current_page,
54
- size=100
55
- )
60
+ prompts_result = (
61
+ self.__openapi_service.benchmark_api.benchmark_benchmark_id_prompts_get(
62
+ benchmark_id=self.id,
63
+ request=QueryModel(page=PageInfo(index=current_page, size=100)),
56
64
  )
57
65
  )
58
-
66
+
59
67
  if prompts_result.total_pages is None:
60
- raise ValueError("An error occurred while fetching prompts: total_pages is None")
61
-
68
+ raise ValueError(
69
+ "An error occurred while fetching prompts: total_pages is None"
70
+ )
71
+
62
72
  total_pages = prompts_result.total_pages
63
-
73
+
64
74
  for prompt in prompts_result.items:
65
75
  self.__prompts.append(prompt.prompt)
66
76
  self.__identifiers.append(prompt.identifier)
67
77
  if prompt.prompt_asset is None:
68
78
  self.__prompt_assets.append(None)
69
79
  else:
70
- assert isinstance(prompt.prompt_asset.actual_instance, FileAssetModel)
71
- source_url = prompt.prompt_asset.actual_instance.metadata["sourceUrl"].actual_instance
80
+ assert isinstance(
81
+ prompt.prompt_asset.actual_instance, FileAssetModel
82
+ )
83
+ source_url = prompt.prompt_asset.actual_instance.metadata[
84
+ "sourceUrl"
85
+ ].actual_instance
72
86
  assert isinstance(source_url, SourceUrlMetadataModel)
73
87
  self.__prompt_assets.append(source_url.url)
74
88
 
75
89
  self.__tags.append(prompt.tags)
76
90
  if current_page >= total_pages:
77
91
  break
78
-
92
+
79
93
  current_page += 1
80
94
 
81
95
  @property
82
96
  def identifiers(self) -> list[str]:
83
97
  if not self.__identifiers:
84
98
  self.__instantiate_prompts()
85
-
99
+
86
100
  return self.__identifiers
87
-
101
+
88
102
  @property
89
103
  def prompts(self) -> list[str | None]:
90
104
  """
@@ -92,9 +106,9 @@ class RapidataBenchmark:
92
106
  """
93
107
  if not self.__prompts:
94
108
  self.__instantiate_prompts()
95
-
109
+
96
110
  return self.__prompts
97
-
111
+
98
112
  @property
99
113
  def prompt_assets(self) -> list[str | None]:
100
114
  """
@@ -102,9 +116,9 @@ class RapidataBenchmark:
102
116
  """
103
117
  if not self.__prompt_assets:
104
118
  self.__instantiate_prompts()
105
-
119
+
106
120
  return self.__prompt_assets
107
-
121
+
108
122
  @property
109
123
  def tags(self) -> list[list[str]]:
110
124
  """
@@ -112,9 +126,9 @@ class RapidataBenchmark:
112
126
  """
113
127
  if not self.__tags:
114
128
  self.__instantiate_prompts()
115
-
129
+
116
130
  return self.__tags
117
-
131
+
118
132
  @property
119
133
  def leaderboards(self) -> list[RapidataLeaderboard]:
120
134
  """
@@ -123,48 +137,63 @@ class RapidataBenchmark:
123
137
  if not self.__leaderboards:
124
138
  current_page = 1
125
139
  total_pages = None
126
-
140
+
127
141
  while True:
128
- leaderboards_result = self.__openapi_service.leaderboard_api.leaderboards_get(
129
- request=QueryModel(
130
- filter=RootFilter(
131
- filters=[
132
- Filter(field="BenchmarkId", operator="Eq", value=self.id)
142
+ leaderboards_result = (
143
+ self.__openapi_service.leaderboard_api.leaderboards_get(
144
+ request=QueryModel(
145
+ filter=RootFilter(
146
+ filters=[
147
+ Filter(
148
+ field="BenchmarkId",
149
+ operator="Eq",
150
+ value=self.id,
151
+ )
133
152
  ]
134
153
  ),
135
- page=PageInfo(
136
- index=current_page,
137
- size=100
154
+ page=PageInfo(index=current_page, size=100),
138
155
  )
139
156
  )
140
157
  )
141
-
158
+
142
159
  if leaderboards_result.total_pages is None:
143
- raise ValueError("An error occurred while fetching leaderboards: total_pages is None")
144
-
160
+ raise ValueError(
161
+ "An error occurred while fetching leaderboards: total_pages is None"
162
+ )
163
+
145
164
  total_pages = leaderboards_result.total_pages
146
-
147
- self.__leaderboards.extend([
148
- RapidataLeaderboard(
149
- leaderboard.name,
150
- leaderboard.instruction,
151
- leaderboard.show_prompt,
152
- leaderboard.show_prompt_asset,
153
- leaderboard.is_inversed,
154
- leaderboard.min_responses,
155
- leaderboard.response_budget,
156
- leaderboard.id,
157
- self.__openapi_service
158
- ) for leaderboard in leaderboards_result.items])
159
-
165
+
166
+ self.__leaderboards.extend(
167
+ [
168
+ RapidataLeaderboard(
169
+ leaderboard.name,
170
+ leaderboard.instruction,
171
+ leaderboard.show_prompt,
172
+ leaderboard.show_prompt_asset,
173
+ leaderboard.is_inversed,
174
+ leaderboard.response_budget,
175
+ leaderboard.min_responses,
176
+ leaderboard.id,
177
+ self.__openapi_service,
178
+ )
179
+ for leaderboard in leaderboards_result.items
180
+ ]
181
+ )
182
+
160
183
  if current_page >= total_pages:
161
184
  break
162
-
185
+
163
186
  current_page += 1
164
-
187
+
165
188
  return self.__leaderboards
166
-
167
- def add_prompt(self, identifier: str, prompt: str | None = None, asset: str | None = None, tags: Optional[list[str]] = None):
189
+
190
+ def add_prompt(
191
+ self,
192
+ identifier: str,
193
+ prompt: str | None = None,
194
+ asset: str | None = None,
195
+ tags: Optional[list[str]] = None,
196
+ ):
168
197
  """
169
198
  Adds a prompt to the benchmark.
170
199
 
@@ -179,25 +208,27 @@ class RapidataBenchmark:
179
208
 
180
209
  if not isinstance(identifier, str):
181
210
  raise ValueError("Identifier must be a string.")
182
-
211
+
183
212
  if prompt is None and asset is None:
184
213
  raise ValueError("Prompt or asset must be provided.")
185
-
214
+
186
215
  if prompt is not None and not isinstance(prompt, str):
187
216
  raise ValueError("Prompt must be a string.")
188
-
217
+
189
218
  if asset is not None and not isinstance(asset, str):
190
219
  raise ValueError("Asset must be a string. That is the link to the asset.")
191
-
220
+
192
221
  if identifier in self.identifiers:
193
222
  raise ValueError("Identifier already exists in the benchmark.")
194
-
195
- if asset is not None and not re.match(r'^https?://', asset):
223
+
224
+ if asset is not None and not re.match(r"^https?://", asset):
196
225
  raise ValueError("Asset must be a link to the asset.")
197
-
198
- if tags is not None and (not isinstance(tags, list) or not all(isinstance(tag, str) for tag in tags)):
226
+
227
+ if tags is not None and (
228
+ not isinstance(tags, list) or not all(isinstance(tag, str) for tag in tags)
229
+ ):
199
230
  raise ValueError("Tags must be a list of strings.")
200
-
231
+
201
232
  self.__identifiers.append(identifier)
202
233
 
203
234
  self.__tags.append(tags)
@@ -209,25 +240,26 @@ class RapidataBenchmark:
209
240
  submit_prompt_model=SubmitPromptModel(
210
241
  identifier=identifier,
211
242
  prompt=prompt,
212
- promptAsset=SubmitPromptModelPromptAsset(
213
- UrlAssetInput(
214
- _t="UrlAssetInput",
215
- url=asset
243
+ promptAsset=(
244
+ SubmitPromptModelPromptAsset(
245
+ UrlAssetInput(_t="UrlAssetInput", url=asset)
216
246
  )
217
- ) if asset is not None else None,
218
- tags=tags
219
- )
247
+ if asset is not None
248
+ else None
249
+ ),
250
+ tags=tags,
251
+ ),
220
252
  )
221
253
 
222
254
  def create_leaderboard(
223
- self,
224
- name: str,
225
- instruction: str,
255
+ self,
256
+ name: str,
257
+ instruction: str,
226
258
  show_prompt: bool = False,
227
259
  show_prompt_asset: bool = False,
228
260
  inverse_ranking: bool = False,
229
- min_responses: int | None = None,
230
- response_budget: int | None = None
261
+ level_of_detail: Literal["low", "medium", "high", "very high"] = "low",
262
+ min_responses_per_matchup: int = 3,
231
263
  ) -> RapidataLeaderboard:
232
264
  """
233
265
  Creates a new leaderboard for the benchmark.
@@ -238,12 +270,14 @@ class RapidataBenchmark:
238
270
  show_prompt: Whether to show the prompt to the users. (default: False)
239
271
  show_prompt_asset: Whether to show the prompt asset to the users. (only works if the prompt asset is a URL) (default: False)
240
272
  inverse_ranking: Whether to inverse the ranking of the leaderboard. (if the question is inversed, e.g. "Which video is worse?")
241
- min_responses: The minimum amount of responses that get collected per comparison. if None, it will be defaulted.
242
- response_budget: The total amount of responses that get collected per new model evaluation. if None, it will be defaulted. Values below 2000 are not recommended.
273
+ level_of_detail: The level of detail of the leaderboard. This will effect how many comparisons are done per model evaluation. (default: "low")
274
+ min_responses_per_matchup: The minimum number of responses required to be considered for the leaderboard. (default: 3)
243
275
  """
276
+ if not isinstance(min_responses_per_matchup, int):
277
+ raise ValueError("Min responses per matchup must be an integer")
244
278
 
245
- if response_budget is not None and response_budget < 2000:
246
- logger.warning("Response budget is below 2000. This is not recommended.")
279
+ if min_responses_per_matchup < 3:
280
+ raise ValueError("Min responses per matchup must be at least 3")
247
281
 
248
282
  leaderboard_result = self.__openapi_service.leaderboard_api.leaderboard_post(
249
283
  create_leaderboard_model=CreateLeaderboardModel(
@@ -253,12 +287,14 @@ class RapidataBenchmark:
253
287
  showPrompt=show_prompt,
254
288
  showPromptAsset=show_prompt_asset,
255
289
  isInversed=inverse_ranking,
256
- minResponses=min_responses,
257
- responseBudget=response_budget
290
+ minResponses=min_responses_per_matchup,
291
+ responseBudget=DetailMapper.get_budget(level_of_detail),
258
292
  )
259
293
  )
260
294
 
261
- assert leaderboard_result.benchmark_id == self.id, "The leaderboard was not created for the correct benchmark."
295
+ assert (
296
+ leaderboard_result.benchmark_id == self.id
297
+ ), "The leaderboard was not created for the correct benchmark."
262
298
 
263
299
  return RapidataLeaderboard(
264
300
  name,
@@ -266,13 +302,15 @@ class RapidataBenchmark:
266
302
  show_prompt,
267
303
  show_prompt_asset,
268
304
  inverse_ranking,
269
- leaderboard_result.min_responses,
270
305
  leaderboard_result.response_budget,
306
+ min_responses_per_matchup,
271
307
  leaderboard_result.id,
272
- self.__openapi_service
308
+ self.__openapi_service,
273
309
  )
274
310
 
275
- def evaluate_model(self, name: str, media: list[str], identifiers: list[str]) -> None:
311
+ def evaluate_model(
312
+ self, name: str, media: list[str], identifiers: list[str]
313
+ ) -> None:
276
314
  """
277
315
  Evaluates a model on the benchmark across all leaderboards.
278
316
 
@@ -284,14 +322,16 @@ class RapidataBenchmark:
284
322
  """
285
323
  if not media:
286
324
  raise ValueError("Media must be a non-empty list of strings")
287
-
325
+
288
326
  if len(media) != len(identifiers):
289
327
  raise ValueError("Media and identifiers must have the same length")
290
-
328
+
291
329
  if not all(identifier in self.identifiers for identifier in identifiers):
292
- raise ValueError("All identifiers must be in the registered identifiers list. To see the registered identifiers, use the identifiers property.\
293
- \nTo see the prompts that are associated with the identifiers, use the prompts property.")
294
-
330
+ raise ValueError(
331
+ "All identifiers must be in the registered identifiers list. To see the registered identifiers, use the identifiers property.\
332
+ \nTo see the prompts that are associated with the identifiers, use the prompts property."
333
+ )
334
+
295
335
  # happens before the creation of the participant to ensure all media paths are valid
296
336
  assets: list[MediaAsset] = []
297
337
  for media_path in media:
@@ -301,12 +341,14 @@ class RapidataBenchmark:
301
341
  benchmark_id=self.id,
302
342
  create_benchmark_participant_model=CreateBenchmarkParticipantModel(
303
343
  name=name,
304
- )
344
+ ),
305
345
  )
306
346
 
307
347
  logger.info(f"Participant created: {participant_result.participant_id}")
308
348
 
309
- participant = BenchmarkParticipant(name, participant_result.participant_id, self.__openapi_service)
349
+ participant = BenchmarkParticipant(
350
+ name, participant_result.participant_id, self.__openapi_service
351
+ )
310
352
 
311
353
  successful_uploads, failed_uploads = participant.upload_media(
312
354
  assets,
@@ -314,15 +356,25 @@ class RapidataBenchmark:
314
356
  )
315
357
 
316
358
  total_uploads = len(assets)
317
- success_rate = (len(successful_uploads) / total_uploads * 100) if total_uploads > 0 else 0
318
- logger.info(f"Upload complete: {len(successful_uploads)} successful, {len(failed_uploads)} failed ({success_rate:.1f}% success rate)")
359
+ success_rate = (
360
+ (len(successful_uploads) / total_uploads * 100) if total_uploads > 0 else 0
361
+ )
362
+ logger.info(
363
+ f"Upload complete: {len(successful_uploads)} successful, {len(failed_uploads)} failed ({success_rate:.1f}% success rate)"
364
+ )
319
365
 
320
366
  if failed_uploads:
321
- logger.error(f"Failed uploads for media: {[asset.path for asset in failed_uploads]}")
322
- logger.warning("Some uploads failed. The model evaluation may be incomplete.")
367
+ logger.error(
368
+ f"Failed uploads for media: {[asset.path for asset in failed_uploads]}"
369
+ )
370
+ logger.warning(
371
+ "Some uploads failed. The model evaluation may be incomplete."
372
+ )
323
373
 
324
374
  if len(successful_uploads) == 0:
325
- raise RuntimeError("No uploads were successful. The model evaluation will not be completed.")
375
+ raise RuntimeError(
376
+ "No uploads were successful. The model evaluation will not be completed."
377
+ )
326
378
 
327
379
  self.__openapi_service.participant_api.participants_participant_id_submit_post(
328
380
  participant_id=participant_result.participant_id
@@ -330,6 +382,6 @@ class RapidataBenchmark:
330
382
 
331
383
  def __str__(self) -> str:
332
384
  return f"RapidataBenchmark(name={self.name}, id={self.id})"
333
-
385
+
334
386
  def __repr__(self) -> str:
335
387
  return self.__str__()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rapidata
3
- Version: 2.34.2
3
+ Version: 2.35.1
4
4
  Summary: Rapidata package containing the Rapidata Python Client to interact with the Rapidata Web API in an easy way.
5
5
  License: Apache-2.0
6
6
  Author: Rapidata AG
@@ -1,5 +1,5 @@
1
- rapidata/__init__.py,sha256=_kFxSic1rowFfqj5HEJkNdrGtb1oe_MQmhcqPLnRDXg,897
2
- rapidata/api_client/__init__.py,sha256=N4Cx20ScEXZ9A2pdsn8ZPUuhnpeXlGByav3P_1ofQ_8,34733
1
+ rapidata/__init__.py,sha256=7ZoBaYqvNh-HjfP3kpiQCpwmvJYhFGdAVtpwwzyhLsQ,897
2
+ rapidata/api_client/__init__.py,sha256=fb2lqv3sj48wAgarp3g6hvtTgd7bfI01DJJLuBZQnFI,34850
3
3
  rapidata/api_client/api/__init__.py,sha256=dGnSE9oPO_ahGh-E1jtw4_VuM_vQueQFuv0IVMQo6uo,1546
4
4
  rapidata/api_client/api/benchmark_api.py,sha256=bC8hAPgHIDU5u1e0loWPWnZX33BW6gsAR8oc5199q2k,129777
5
5
  rapidata/api_client/api/campaign_api.py,sha256=406gNDALFb0sJhfx727ZM5_0GDX4iB0w5ym2dExLm4g,49894
@@ -12,7 +12,7 @@ rapidata/api_client/api/dataset_api.py,sha256=DUCEfP7jlMAAMdvEa-47xq0mq3MGcyk4DA
12
12
  rapidata/api_client/api/evaluation_workflow_api.py,sha256=E0Phmx54jzXx7LZYGquTqzZSrX2aE5PS9rAs5HdDjvs,15151
13
13
  rapidata/api_client/api/feedback_api.py,sha256=-ZI2-1HtQ7wAzBKClgXMmMHtYdgoZtWrpQql3p51qp0,11589
14
14
  rapidata/api_client/api/identity_api.py,sha256=LmK6cTXssNjCa1BteOMc8P4FsyRiHQ_Kr30vmWIAYko,55093
15
- rapidata/api_client/api/leaderboard_api.py,sha256=M82K7VI72c72Ij1_Svz7qm_ttVDuAilSi0g9B4BuU4g,167604
15
+ rapidata/api_client/api/leaderboard_api.py,sha256=wGnMTeaYsyeWwvptdD5R3OJK42ZJWQhyJvwDmN3bqQc,179275
16
16
  rapidata/api_client/api/newsletter_api.py,sha256=3NU6HO5Gtm-RH-nx5hcp2CCE4IZmWHwTfCLMMz-Xpq4,22655
17
17
  rapidata/api_client/api/order_api.py,sha256=6hD7a_8LVGuGdT_k1lE-gQKCWcSAcFMJO5Nsdc8xgbM,214715
18
18
  rapidata/api_client/api/participant_api.py,sha256=eOoZDCRF61bmPAcoMWhOREgUp3rEYhrQLEPJq8o27b4,87709
@@ -29,7 +29,7 @@ rapidata/api_client/api_client.py,sha256=EDhxAOUc5JFWvFsF1zc726Q7GoEFkuB8uor5SlG
29
29
  rapidata/api_client/api_response.py,sha256=eMxw1mpmJcoGZ3gs9z6jM4oYoZ10Gjk333s9sKxGv7s,652
30
30
  rapidata/api_client/configuration.py,sha256=g472vHVPLBotq8EkfSXP4sbp7xnn-3sb8O8BBlRWK1I,15931
31
31
  rapidata/api_client/exceptions.py,sha256=eLLd1fxM0Ygf3IIG6aNx9hdy79drst5Cem0UjI_NamM,5978
32
- rapidata/api_client/models/__init__.py,sha256=1mQliyAedV6ftD2vpUt5Dnv_E8JyeCFkSMZB_YMutC4,32646
32
+ rapidata/api_client/models/__init__.py,sha256=f6lXxDLfQKlvw0DZddgyzBZn3jEFyt5OBIitESxPbtQ,32763
33
33
  rapidata/api_client/models/ab_test_selection.py,sha256=xQcE1BgKSnkTcmIuroeVOAQcAhGkHLlMP9XjakMFgDc,4327
34
34
  rapidata/api_client/models/ab_test_selection_a_inner.py,sha256=VsCi27NBGxAtupB_sQZCzUEsTNNgSGV_Mo-Fi0UY1Jw,11657
35
35
  rapidata/api_client/models/add_campaign_artifact_result.py,sha256=4IvFVS-tLlL6eHsWp-IZ_ul5T30-h3YEwd2B5ioBbgY,2582
@@ -212,8 +212,8 @@ rapidata/api_client/models/file_asset_model_metadata_value.py,sha256=KiA2BENesZt
212
212
  rapidata/api_client/models/file_type.py,sha256=t7qrK5oEaVWWg7pis5mO2BviaVNuAUh7O1SNib_kfGw,783
213
213
  rapidata/api_client/models/file_type_metadata.py,sha256=qvRJ0SWEy2mPfvjGAc1GV5AMRjOm4EMXRAQ-sTupv6Q,3412
214
214
  rapidata/api_client/models/file_type_metadata_model.py,sha256=1BHfr0wk3HQSNmMTDWdROPtLsPwE88Z2D1YaEnOte7E,3308
215
- rapidata/api_client/models/filter.py,sha256=-gje_jVjRXyeguRj9yGuHgyi53hn871Z_2ULs_5yVr8,4541
216
- rapidata/api_client/models/filter_operator.py,sha256=hVWuGOU6iseDxdJCG4E-b5Pbb8w3rCLT26ukvmwx6G0,889
215
+ rapidata/api_client/models/filter.py,sha256=cIvVdYntcGLQLgW0iNTqGdEj_jJQQBcMt9pF_l_FVug,4553
216
+ rapidata/api_client/models/filter_operator.py,sha256=zPAPopNy6Jlw-3Gz-tEsape0SyriGvfXREij57bNdP4,903
217
217
  rapidata/api_client/models/form_file_wrapper.py,sha256=1KSzAKKGE_-yXTyIyjHi5ctc_CrGCRfkX3EMqlW0IFA,4208
218
218
  rapidata/api_client/models/free_text_payload.py,sha256=u5p0ybMMOH2ecmzT3FauQs0LgQgLNnUOD8Kk1umOqtw,2952
219
219
  rapidata/api_client/models/free_text_rapid_blueprint.py,sha256=Tpf2durJj9olZbI-QPl89BiLESrOs9fDbATpch8tRj4,2988
@@ -425,7 +425,7 @@ rapidata/api_client/models/root_filter.py,sha256=oBtXjKE0i3m_HmD1XeHwaLCFFQRkpkW
425
425
  rapidata/api_client/models/run_status.py,sha256=wtGbdMPDcpR35pMbczVABkYfERTWnocMon2s-uaaaUM,798
426
426
  rapidata/api_client/models/runs_by_leaderboard_result.py,sha256=5sRSV7d9rxjBbdTnWk10ke1GW4jVKZ8O5iSMclthryk,3581
427
427
  rapidata/api_client/models/runs_by_leaderboard_result_paged_result.py,sha256=Ts5gkkQLKIbISLay6xlVYdu0vyXxAwtY7mwQMKobC-Y,3559
428
- rapidata/api_client/models/sample_by_participant.py,sha256=N1DHLru8cPit7-sT_8a1Ein47slvtH6UsJwPUYmZjgs,4437
428
+ rapidata/api_client/models/sample_by_participant.py,sha256=-CxSOHphruBeeSTblX4yrERL4o7PFx4ofnfyl9JMcko,4508
429
429
  rapidata/api_client/models/sample_by_participant_paged_result.py,sha256=FKobGmMEymLQwRbwdS5R-nox_AtFAxg9CuVopWNQgTk,3526
430
430
  rapidata/api_client/models/scrub_payload.py,sha256=tX-QU_a8GUQWBPb1GofGLFupucZF5TY2LUpqdyfHDSI,2920
431
431
  rapidata/api_client/models/scrub_range.py,sha256=2P__eZ4HeAxWcjFkp-p938Ih8GHf0rJea18sIGxUN0A,2527
@@ -494,6 +494,7 @@ rapidata/api_client/models/update_campaign_model.py,sha256=9mCp3fQZqtecsCQS1SBbf
494
494
  rapidata/api_client/models/update_dataset_name_model.py,sha256=IeGRrxo6G2fKfMwnoBFYj4BW9ksGo_8db21VcLYfLvc,2599
495
495
  rapidata/api_client/models/update_dimensions_model.py,sha256=jDg2114Y14AxcQHg_C6oTxMZ17Cq4vEQRtzeNjN70dk,2591
496
496
  rapidata/api_client/models/update_leaderboard_name_model.py,sha256=QmEy8MNkrQYyJtXR91MKN1j511YFIIW-ovN3MxvFoWw,2614
497
+ rapidata/api_client/models/update_leaderboard_response_config_model.py,sha256=Jkp3zS358CD1EPhknNp1voUOAtuQFPBstlw1SMJc6JI,2978
497
498
  rapidata/api_client/models/update_order_model.py,sha256=RUlxnzLqO6o-w5EEPb8wv1ANRKpkSbs8PhGM42T35uw,2570
498
499
  rapidata/api_client/models/update_order_name_model.py,sha256=Cm8qZUJKgx1JTgkhlJcVNdLwPnRV8gqeeo7G4bVDOS4,2582
499
500
  rapidata/api_client/models/update_participant_name_model.py,sha256=PPXeS5euTpMt7QrmLWordYU1tGS1gZ-zwjgQDglld_g,2614
@@ -536,16 +537,17 @@ rapidata/api_client/models/workflow_split_model_filter_configs_inner.py,sha256=1
536
537
  rapidata/api_client/models/workflow_state.py,sha256=5LAK1se76RCoozeVB6oxMPb8p_5bhLZJqn7q5fFQWis,850
537
538
  rapidata/api_client/models/zip_entry_file_wrapper.py,sha256=06CoNJD3x511K3rnSmkrwwhc9GbQxwxF-c0ldOyJbAs,4240
538
539
  rapidata/api_client/rest.py,sha256=rtIMcgINZOUaDFaJIinJkXRSddNJmXvMRMfgO2Ezk2o,10835
539
- rapidata/api_client_README.md,sha256=hFJwQC9pI9JtopNmka-9TUPcRqhY3zhhrZj0ebJizVs,62286
540
+ rapidata/api_client_README.md,sha256=5k-14YuFPSGEpYGJsIL0xwyEWeIvuWd3VOI7jFYw_dc,62649
540
541
  rapidata/rapidata_client/__init__.py,sha256=CfkQxCdURXzJsVP6sxKmufze2u-IE_snG_G8NEkE_JM,1225
541
542
  rapidata/rapidata_client/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
542
543
  rapidata/rapidata_client/api/rapidata_exception.py,sha256=BIdmHRrJUGW-Mqhp1H_suemZaR6w9TgjWq-ZW5iUPdQ,3878
543
544
  rapidata/rapidata_client/benchmark/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
545
+ rapidata/rapidata_client/benchmark/_detail_mapper.py,sha256=HmzJwR2dojs0c2PaEJ5lwSPtjaqeoPXI7RQk5kwSB9A,944
544
546
  rapidata/rapidata_client/benchmark/leaderboard/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
545
- rapidata/rapidata_client/benchmark/leaderboard/rapidata_leaderboard.py,sha256=PKqcW_pQJLmNg_feLDCtF-9SufMw2_EU4yKFo6QxxR0,3950
547
+ rapidata/rapidata_client/benchmark/leaderboard/rapidata_leaderboard.py,sha256=xRNXw__K4-4wb4UI-EgLyJRwGO5OkFdoN3A1S3MkdF4,6092
546
548
  rapidata/rapidata_client/benchmark/participant/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
547
549
  rapidata/rapidata_client/benchmark/participant/_participant.py,sha256=yN82EWrZXYszsM8Ns0HRMXCTivltkyxcpGRK-cdT01Y,3683
548
- rapidata/rapidata_client/benchmark/rapidata_benchmark.py,sha256=qk8Y0ShIawtSNsxhp3rZ-OOCFuh4Pl_w7K9IRnTNt3Y,14131
550
+ rapidata/rapidata_client/benchmark/rapidata_benchmark.py,sha256=Z0jT9hiWyS3km0BwtGXbdmtopdnecf1z-ucDdwg06Y0,14793
549
551
  rapidata/rapidata_client/benchmark/rapidata_benchmark_manager.py,sha256=NnXCsU2_8SlGxdI51MeIUWK7Ku0pEylDSPhnzOPzTV0,5291
550
552
  rapidata/rapidata_client/country_codes/__init__.py,sha256=FB9Dcks44J6C6YBSYmTmNZ71tE130x6NO_3aLJ8fKzQ,40
551
553
  rapidata/rapidata_client/country_codes/country_codes.py,sha256=ePHqeb7y9DWQZAnddBzPx1puYBcrgUjdR2sbFijuFD8,283
@@ -649,7 +651,7 @@ rapidata/service/__init__.py,sha256=s9bS1AJZaWIhLtJX_ZA40_CK39rAAkwdAmymTMbeWl4,
649
651
  rapidata/service/credential_manager.py,sha256=pUEEtp6VrFWYhfUUtyqmS0AlRqe2Y0kFkY6o22IT4KM,8682
650
652
  rapidata/service/local_file_service.py,sha256=pgorvlWcx52Uh3cEG6VrdMK_t__7dacQ_5AnfY14BW8,877
651
653
  rapidata/service/openapi_service.py,sha256=v2fhPbHmD0J11ZRZY6f80PdIdGwpRFlbfMH9t8Ypg5A,5403
652
- rapidata-2.34.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
653
- rapidata-2.34.2.dist-info/METADATA,sha256=7bXBvmc-1hRDkklCi2EvcLlAelpTVVzghPj7cJdV6hc,1264
654
- rapidata-2.34.2.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
655
- rapidata-2.34.2.dist-info/RECORD,,
654
+ rapidata-2.35.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
655
+ rapidata-2.35.1.dist-info/METADATA,sha256=mQGouVcYnBiJg-noTAW2JpQPxqSId68ZqIznzJDyJWE,1264
656
+ rapidata-2.35.1.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
657
+ rapidata-2.35.1.dist-info/RECORD,,