rapidata 2.34.1__py3-none-any.whl → 2.35.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rapidata might be problematic. Click here for more details.

rapidata/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "2.34.1"
1
+ __version__ = "2.35.0"
2
2
 
3
3
  from .rapidata_client import (
4
4
  RapidataClient,
@@ -404,6 +404,7 @@ from rapidata.api_client.models.update_campaign_model import UpdateCampaignModel
404
404
  from rapidata.api_client.models.update_dataset_name_model import UpdateDatasetNameModel
405
405
  from rapidata.api_client.models.update_dimensions_model import UpdateDimensionsModel
406
406
  from rapidata.api_client.models.update_leaderboard_name_model import UpdateLeaderboardNameModel
407
+ from rapidata.api_client.models.update_leaderboard_response_config_model import UpdateLeaderboardResponseConfigModel
407
408
  from rapidata.api_client.models.update_order_name_model import UpdateOrderNameModel
408
409
  from rapidata.api_client.models.update_participant_name_model import UpdateParticipantNameModel
409
410
  from rapidata.api_client.models.update_prompt_tags_model import UpdatePromptTagsModel
@@ -34,6 +34,7 @@ from rapidata.api_client.models.standing_by_leaderboard_paged_result import Stan
34
34
  from rapidata.api_client.models.standings_by_leaderboard_result import StandingsByLeaderboardResult
35
35
  from rapidata.api_client.models.submit_participant_result import SubmitParticipantResult
36
36
  from rapidata.api_client.models.update_leaderboard_name_model import UpdateLeaderboardNameModel
37
+ from rapidata.api_client.models.update_leaderboard_response_config_model import UpdateLeaderboardResponseConfigModel
37
38
 
38
39
  from rapidata.api_client.api_client import ApiClient, RequestSerialized
39
40
  from rapidata.api_client.api_response import ApiResponse
@@ -3098,6 +3099,288 @@ class LeaderboardApi:
3098
3099
 
3099
3100
 
3100
3101
 
3102
+ @validate_call
3103
+ def leaderboard_leaderboard_id_response_config_put(
3104
+ self,
3105
+ leaderboard_id: StrictStr,
3106
+ update_leaderboard_response_config_model: Optional[UpdateLeaderboardResponseConfigModel] = None,
3107
+ _request_timeout: Union[
3108
+ None,
3109
+ Annotated[StrictFloat, Field(gt=0)],
3110
+ Tuple[
3111
+ Annotated[StrictFloat, Field(gt=0)],
3112
+ Annotated[StrictFloat, Field(gt=0)]
3113
+ ]
3114
+ ] = None,
3115
+ _request_auth: Optional[Dict[StrictStr, Any]] = None,
3116
+ _content_type: Optional[StrictStr] = None,
3117
+ _headers: Optional[Dict[StrictStr, Any]] = None,
3118
+ _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
3119
+ ) -> None:
3120
+ """Updates the name of a leaderboard.
3121
+
3122
+
3123
+ :param leaderboard_id: (required)
3124
+ :type leaderboard_id: str
3125
+ :param update_leaderboard_response_config_model:
3126
+ :type update_leaderboard_response_config_model: UpdateLeaderboardResponseConfigModel
3127
+ :param _request_timeout: timeout setting for this request. If one
3128
+ number provided, it will be total request
3129
+ timeout. It can also be a pair (tuple) of
3130
+ (connection, read) timeouts.
3131
+ :type _request_timeout: int, tuple(int, int), optional
3132
+ :param _request_auth: set to override the auth_settings for an a single
3133
+ request; this effectively ignores the
3134
+ authentication in the spec for a single request.
3135
+ :type _request_auth: dict, optional
3136
+ :param _content_type: force content-type for the request.
3137
+ :type _content_type: str, Optional
3138
+ :param _headers: set to override the headers for a single
3139
+ request; this effectively ignores the headers
3140
+ in the spec for a single request.
3141
+ :type _headers: dict, optional
3142
+ :param _host_index: set to override the host_index for a single
3143
+ request; this effectively ignores the host_index
3144
+ in the spec for a single request.
3145
+ :type _host_index: int, optional
3146
+ :return: Returns the result object.
3147
+ """ # noqa: E501
3148
+
3149
+ _param = self._leaderboard_leaderboard_id_response_config_put_serialize(
3150
+ leaderboard_id=leaderboard_id,
3151
+ update_leaderboard_response_config_model=update_leaderboard_response_config_model,
3152
+ _request_auth=_request_auth,
3153
+ _content_type=_content_type,
3154
+ _headers=_headers,
3155
+ _host_index=_host_index
3156
+ )
3157
+
3158
+ _response_types_map: Dict[str, Optional[str]] = {
3159
+ '204': None,
3160
+ }
3161
+ response_data = self.api_client.call_api(
3162
+ *_param,
3163
+ _request_timeout=_request_timeout
3164
+ )
3165
+ response_data.read()
3166
+ return self.api_client.response_deserialize(
3167
+ response_data=response_data,
3168
+ response_types_map=_response_types_map,
3169
+ ).data
3170
+
3171
+
3172
+ @validate_call
3173
+ def leaderboard_leaderboard_id_response_config_put_with_http_info(
3174
+ self,
3175
+ leaderboard_id: StrictStr,
3176
+ update_leaderboard_response_config_model: Optional[UpdateLeaderboardResponseConfigModel] = None,
3177
+ _request_timeout: Union[
3178
+ None,
3179
+ Annotated[StrictFloat, Field(gt=0)],
3180
+ Tuple[
3181
+ Annotated[StrictFloat, Field(gt=0)],
3182
+ Annotated[StrictFloat, Field(gt=0)]
3183
+ ]
3184
+ ] = None,
3185
+ _request_auth: Optional[Dict[StrictStr, Any]] = None,
3186
+ _content_type: Optional[StrictStr] = None,
3187
+ _headers: Optional[Dict[StrictStr, Any]] = None,
3188
+ _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
3189
+ ) -> ApiResponse[None]:
3190
+ """Updates the name of a leaderboard.
3191
+
3192
+
3193
+ :param leaderboard_id: (required)
3194
+ :type leaderboard_id: str
3195
+ :param update_leaderboard_response_config_model:
3196
+ :type update_leaderboard_response_config_model: UpdateLeaderboardResponseConfigModel
3197
+ :param _request_timeout: timeout setting for this request. If one
3198
+ number provided, it will be total request
3199
+ timeout. It can also be a pair (tuple) of
3200
+ (connection, read) timeouts.
3201
+ :type _request_timeout: int, tuple(int, int), optional
3202
+ :param _request_auth: set to override the auth_settings for an a single
3203
+ request; this effectively ignores the
3204
+ authentication in the spec for a single request.
3205
+ :type _request_auth: dict, optional
3206
+ :param _content_type: force content-type for the request.
3207
+ :type _content_type: str, Optional
3208
+ :param _headers: set to override the headers for a single
3209
+ request; this effectively ignores the headers
3210
+ in the spec for a single request.
3211
+ :type _headers: dict, optional
3212
+ :param _host_index: set to override the host_index for a single
3213
+ request; this effectively ignores the host_index
3214
+ in the spec for a single request.
3215
+ :type _host_index: int, optional
3216
+ :return: Returns the result object.
3217
+ """ # noqa: E501
3218
+
3219
+ _param = self._leaderboard_leaderboard_id_response_config_put_serialize(
3220
+ leaderboard_id=leaderboard_id,
3221
+ update_leaderboard_response_config_model=update_leaderboard_response_config_model,
3222
+ _request_auth=_request_auth,
3223
+ _content_type=_content_type,
3224
+ _headers=_headers,
3225
+ _host_index=_host_index
3226
+ )
3227
+
3228
+ _response_types_map: Dict[str, Optional[str]] = {
3229
+ '204': None,
3230
+ }
3231
+ response_data = self.api_client.call_api(
3232
+ *_param,
3233
+ _request_timeout=_request_timeout
3234
+ )
3235
+ response_data.read()
3236
+ return self.api_client.response_deserialize(
3237
+ response_data=response_data,
3238
+ response_types_map=_response_types_map,
3239
+ )
3240
+
3241
+
3242
+ @validate_call
3243
+ def leaderboard_leaderboard_id_response_config_put_without_preload_content(
3244
+ self,
3245
+ leaderboard_id: StrictStr,
3246
+ update_leaderboard_response_config_model: Optional[UpdateLeaderboardResponseConfigModel] = None,
3247
+ _request_timeout: Union[
3248
+ None,
3249
+ Annotated[StrictFloat, Field(gt=0)],
3250
+ Tuple[
3251
+ Annotated[StrictFloat, Field(gt=0)],
3252
+ Annotated[StrictFloat, Field(gt=0)]
3253
+ ]
3254
+ ] = None,
3255
+ _request_auth: Optional[Dict[StrictStr, Any]] = None,
3256
+ _content_type: Optional[StrictStr] = None,
3257
+ _headers: Optional[Dict[StrictStr, Any]] = None,
3258
+ _host_index: Annotated[StrictInt, Field(ge=0, le=0)] = 0,
3259
+ ) -> RESTResponseType:
3260
+ """Updates the name of a leaderboard.
3261
+
3262
+
3263
+ :param leaderboard_id: (required)
3264
+ :type leaderboard_id: str
3265
+ :param update_leaderboard_response_config_model:
3266
+ :type update_leaderboard_response_config_model: UpdateLeaderboardResponseConfigModel
3267
+ :param _request_timeout: timeout setting for this request. If one
3268
+ number provided, it will be total request
3269
+ timeout. It can also be a pair (tuple) of
3270
+ (connection, read) timeouts.
3271
+ :type _request_timeout: int, tuple(int, int), optional
3272
+ :param _request_auth: set to override the auth_settings for an a single
3273
+ request; this effectively ignores the
3274
+ authentication in the spec for a single request.
3275
+ :type _request_auth: dict, optional
3276
+ :param _content_type: force content-type for the request.
3277
+ :type _content_type: str, Optional
3278
+ :param _headers: set to override the headers for a single
3279
+ request; this effectively ignores the headers
3280
+ in the spec for a single request.
3281
+ :type _headers: dict, optional
3282
+ :param _host_index: set to override the host_index for a single
3283
+ request; this effectively ignores the host_index
3284
+ in the spec for a single request.
3285
+ :type _host_index: int, optional
3286
+ :return: Returns the result object.
3287
+ """ # noqa: E501
3288
+
3289
+ _param = self._leaderboard_leaderboard_id_response_config_put_serialize(
3290
+ leaderboard_id=leaderboard_id,
3291
+ update_leaderboard_response_config_model=update_leaderboard_response_config_model,
3292
+ _request_auth=_request_auth,
3293
+ _content_type=_content_type,
3294
+ _headers=_headers,
3295
+ _host_index=_host_index
3296
+ )
3297
+
3298
+ _response_types_map: Dict[str, Optional[str]] = {
3299
+ '204': None,
3300
+ }
3301
+ response_data = self.api_client.call_api(
3302
+ *_param,
3303
+ _request_timeout=_request_timeout
3304
+ )
3305
+ return response_data.response
3306
+
3307
+
3308
+ def _leaderboard_leaderboard_id_response_config_put_serialize(
3309
+ self,
3310
+ leaderboard_id,
3311
+ update_leaderboard_response_config_model,
3312
+ _request_auth,
3313
+ _content_type,
3314
+ _headers,
3315
+ _host_index,
3316
+ ) -> RequestSerialized:
3317
+
3318
+ _host = None
3319
+
3320
+ _collection_formats: Dict[str, str] = {
3321
+ }
3322
+
3323
+ _path_params: Dict[str, str] = {}
3324
+ _query_params: List[Tuple[str, str]] = []
3325
+ _header_params: Dict[str, Optional[str]] = _headers or {}
3326
+ _form_params: List[Tuple[str, str]] = []
3327
+ _files: Dict[
3328
+ str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]]
3329
+ ] = {}
3330
+ _body_params: Optional[bytes] = None
3331
+
3332
+ # process the path parameters
3333
+ if leaderboard_id is not None:
3334
+ _path_params['leaderboardId'] = leaderboard_id
3335
+ # process the query parameters
3336
+ # process the header parameters
3337
+ # process the form parameters
3338
+ # process the body parameter
3339
+ if update_leaderboard_response_config_model is not None:
3340
+ _body_params = update_leaderboard_response_config_model
3341
+
3342
+
3343
+
3344
+ # set the HTTP header `Content-Type`
3345
+ if _content_type:
3346
+ _header_params['Content-Type'] = _content_type
3347
+ else:
3348
+ _default_content_type = (
3349
+ self.api_client.select_header_content_type(
3350
+ [
3351
+ 'application/json',
3352
+ 'text/json',
3353
+ 'application/*+json'
3354
+ ]
3355
+ )
3356
+ )
3357
+ if _default_content_type is not None:
3358
+ _header_params['Content-Type'] = _default_content_type
3359
+
3360
+ # authentication setting
3361
+ _auth_settings: List[str] = [
3362
+ 'bearer',
3363
+ 'oauth2'
3364
+ ]
3365
+
3366
+ return self.api_client.param_serialize(
3367
+ method='PUT',
3368
+ resource_path='/leaderboard/{leaderboardId}/response-config',
3369
+ path_params=_path_params,
3370
+ query_params=_query_params,
3371
+ header_params=_header_params,
3372
+ body=_body_params,
3373
+ post_params=_form_params,
3374
+ files=_files,
3375
+ auth_settings=_auth_settings,
3376
+ collection_formats=_collection_formats,
3377
+ _host=_host,
3378
+ _request_auth=_request_auth
3379
+ )
3380
+
3381
+
3382
+
3383
+
3101
3384
  @validate_call
3102
3385
  def leaderboard_leaderboard_id_runs_get(
3103
3386
  self,
@@ -365,6 +365,7 @@ from rapidata.api_client.models.update_campaign_model import UpdateCampaignModel
365
365
  from rapidata.api_client.models.update_dataset_name_model import UpdateDatasetNameModel
366
366
  from rapidata.api_client.models.update_dimensions_model import UpdateDimensionsModel
367
367
  from rapidata.api_client.models.update_leaderboard_name_model import UpdateLeaderboardNameModel
368
+ from rapidata.api_client.models.update_leaderboard_response_config_model import UpdateLeaderboardResponseConfigModel
368
369
  from rapidata.api_client.models.update_order_name_model import UpdateOrderNameModel
369
370
  from rapidata.api_client.models.update_participant_name_model import UpdateParticipantNameModel
370
371
  from rapidata.api_client.models.update_prompt_tags_model import UpdatePromptTagsModel
@@ -39,8 +39,8 @@ class Filter(BaseModel):
39
39
  if value is None:
40
40
  return value
41
41
 
42
- if value not in set(['Eq', 'Neq', 'Gt', 'Gte', 'Lt', 'Lte', 'Contains', 'StartsWith', 'EndsWith']):
43
- raise ValueError("must be one of enum values ('Eq', 'Neq', 'Gt', 'Gte', 'Lt', 'Lte', 'Contains', 'StartsWith', 'EndsWith')")
42
+ if value not in set(['Eq', 'Neq', 'Gt', 'Gte', 'Lt', 'Lte', 'Contains', 'StartsWith', 'EndsWith', 'In']):
43
+ raise ValueError("must be one of enum values ('Eq', 'Neq', 'Gt', 'Gte', 'Lt', 'Lte', 'Contains', 'StartsWith', 'EndsWith', 'In')")
44
44
  return value
45
45
 
46
46
  @field_validator('logic')
@@ -35,6 +35,7 @@ class FilterOperator(str, Enum):
35
35
  CONTAINS = 'Contains'
36
36
  STARTSWITH = 'StartsWith'
37
37
  ENDSWITH = 'EndsWith'
38
+ IN = 'In'
38
39
 
39
40
  @classmethod
40
41
  def from_json(cls, json_str: str) -> Self:
@@ -34,10 +34,11 @@ class SampleByParticipant(BaseModel):
34
34
  asset: DatapointAsset
35
35
  prompt: Optional[StrictStr] = None
36
36
  prompt_asset: Optional[GetValidationRapidsResultAsset] = Field(default=None, alias="promptAsset")
37
+ tags: List[StrictStr]
37
38
  created_at: Optional[datetime] = Field(default=None, alias="createdAt")
38
39
  owner_id: Optional[StrictStr] = Field(default=None, alias="ownerId")
39
40
  owner_mail: StrictStr = Field(alias="ownerMail")
40
- __properties: ClassVar[List[str]] = ["id", "identifier", "asset", "prompt", "promptAsset", "createdAt", "ownerId", "ownerMail"]
41
+ __properties: ClassVar[List[str]] = ["id", "identifier", "asset", "prompt", "promptAsset", "tags", "createdAt", "ownerId", "ownerMail"]
41
42
 
42
43
  model_config = ConfigDict(
43
44
  populate_by_name=True,
@@ -111,6 +112,7 @@ class SampleByParticipant(BaseModel):
111
112
  "asset": DatapointAsset.from_dict(obj["asset"]) if obj.get("asset") is not None else None,
112
113
  "prompt": obj.get("prompt"),
113
114
  "promptAsset": GetValidationRapidsResultAsset.from_dict(obj["promptAsset"]) if obj.get("promptAsset") is not None else None,
115
+ "tags": obj.get("tags"),
114
116
  "createdAt": obj.get("createdAt"),
115
117
  "ownerId": obj.get("ownerId"),
116
118
  "ownerMail": obj.get("ownerMail")
@@ -0,0 +1,89 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Rapidata.Dataset
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: v1
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictInt
21
+ from typing import Any, ClassVar, Dict, List
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class UpdateLeaderboardResponseConfigModel(BaseModel):
26
+ """
27
+ The model used to update the name of a leaderboard.
28
+ """ # noqa: E501
29
+ response_budget: StrictInt = Field(description="The amount of responses that will be collected when onboarding a new participant.", alias="responseBudget")
30
+ min_responses: StrictInt = Field(description="The amount of responses that will be collected as a minimum on each matchup.", alias="minResponses")
31
+ __properties: ClassVar[List[str]] = ["responseBudget", "minResponses"]
32
+
33
+ model_config = ConfigDict(
34
+ populate_by_name=True,
35
+ validate_assignment=True,
36
+ protected_namespaces=(),
37
+ )
38
+
39
+
40
+ def to_str(self) -> str:
41
+ """Returns the string representation of the model using alias"""
42
+ return pprint.pformat(self.model_dump(by_alias=True))
43
+
44
+ def to_json(self) -> str:
45
+ """Returns the JSON representation of the model using alias"""
46
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
47
+ return json.dumps(self.to_dict())
48
+
49
+ @classmethod
50
+ def from_json(cls, json_str: str) -> Optional[Self]:
51
+ """Create an instance of UpdateLeaderboardResponseConfigModel from a JSON string"""
52
+ return cls.from_dict(json.loads(json_str))
53
+
54
+ def to_dict(self) -> Dict[str, Any]:
55
+ """Return the dictionary representation of the model using alias.
56
+
57
+ This has the following differences from calling pydantic's
58
+ `self.model_dump(by_alias=True)`:
59
+
60
+ * `None` is only added to the output dict for nullable fields that
61
+ were set at model initialization. Other fields with value `None`
62
+ are ignored.
63
+ """
64
+ excluded_fields: Set[str] = set([
65
+ ])
66
+
67
+ _dict = self.model_dump(
68
+ by_alias=True,
69
+ exclude=excluded_fields,
70
+ exclude_none=True,
71
+ )
72
+ return _dict
73
+
74
+ @classmethod
75
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
76
+ """Create an instance of UpdateLeaderboardResponseConfigModel from a dict"""
77
+ if obj is None:
78
+ return None
79
+
80
+ if not isinstance(obj, dict):
81
+ return cls.model_validate(obj)
82
+
83
+ _obj = cls.model_validate({
84
+ "responseBudget": obj.get("responseBudget"),
85
+ "minResponses": obj.get("minResponses")
86
+ })
87
+ return _obj
88
+
89
+
@@ -135,6 +135,7 @@ Class | Method | HTTP request | Description
135
135
  *LeaderboardApi* | [**leaderboard_leaderboard_id_participants_post**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_participants_post) | **POST** /leaderboard/{leaderboardId}/participants | Creates a participant in a leaderboard.
136
136
  *LeaderboardApi* | [**leaderboard_leaderboard_id_prompts_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_prompts_get) | **GET** /leaderboard/{leaderboardId}/prompts | returns the paged prompts of a leaderboard by its ID.
137
137
  *LeaderboardApi* | [**leaderboard_leaderboard_id_prompts_post**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_prompts_post) | **POST** /leaderboard/{leaderboardId}/prompts | adds a new prompt to a leaderboard.
138
+ *LeaderboardApi* | [**leaderboard_leaderboard_id_response_config_put**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_response_config_put) | **PUT** /leaderboard/{leaderboardId}/response-config | Updates the name of a leaderboard.
138
139
  *LeaderboardApi* | [**leaderboard_leaderboard_id_runs_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_runs_get) | **GET** /leaderboard/{leaderboardId}/runs | Gets the runs related to a leaderboard
139
140
  *LeaderboardApi* | [**leaderboard_leaderboard_id_standings_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_standings_get) | **GET** /leaderboard/{leaderboardId}/standings | queries all the participants connected to leaderboard by its ID.
140
141
  *LeaderboardApi* | [**leaderboard_post**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_post) | **POST** /leaderboard | Creates a new leaderboard with the specified name and criteria.
@@ -563,6 +564,7 @@ Class | Method | HTTP request | Description
563
564
  - [UpdateDatasetNameModel](rapidata/api_client/docs/UpdateDatasetNameModel.md)
564
565
  - [UpdateDimensionsModel](rapidata/api_client/docs/UpdateDimensionsModel.md)
565
566
  - [UpdateLeaderboardNameModel](rapidata/api_client/docs/UpdateLeaderboardNameModel.md)
567
+ - [UpdateLeaderboardResponseConfigModel](rapidata/api_client/docs/UpdateLeaderboardResponseConfigModel.md)
566
568
  - [UpdateOrderNameModel](rapidata/api_client/docs/UpdateOrderNameModel.md)
567
569
  - [UpdateParticipantNameModel](rapidata/api_client/docs/UpdateParticipantNameModel.md)
568
570
  - [UpdatePromptTagsModel](rapidata/api_client/docs/UpdatePromptTagsModel.md)
@@ -0,0 +1,34 @@
1
+ from typing import Literal
2
+
3
+
4
+ class DetailMapper:
5
+ MIN_RESPONSES = 3
6
+
7
+ @staticmethod
8
+ def get_budget(
9
+ level_of_detail: Literal["low", "medium", "high", "very high"]
10
+ ) -> int:
11
+ if level_of_detail == "low":
12
+ return 2_000
13
+ elif level_of_detail == "medium":
14
+ return 4_000
15
+ elif level_of_detail == "high":
16
+ return 8_000
17
+ elif level_of_detail == "very high":
18
+ return 16_000
19
+ else:
20
+ raise ValueError(
21
+ "Invalid level of detail. Must be one of: 'low', 'medium', 'high', 'very high'"
22
+ )
23
+
24
+ @staticmethod
25
+ def get_level_of_detail(
26
+ response_budget: int,
27
+ ) -> Literal["low", "medium", "high", "very high"]:
28
+ if response_budget < 4_000:
29
+ return "low"
30
+ elif response_budget < 8_000:
31
+ return "medium"
32
+ elif response_budget < 16_000:
33
+ return "high"
34
+ return "very high"
@@ -1,7 +1,12 @@
1
1
  import pandas as pd
2
- from typing import Optional
2
+ from typing import Literal, Optional
3
3
 
4
+ from rapidata.rapidata_client.logging import logger
5
+ from rapidata.rapidata_client.benchmark._detail_mapper import DetailMapper
4
6
  from rapidata.service.openapi_service import OpenAPIService
7
+ from rapidata.api_client.models.update_leaderboard_response_config_model import (
8
+ UpdateLeaderboardResponseConfigModel,
9
+ )
5
10
 
6
11
 
7
12
  class RapidataLeaderboard:
@@ -17,68 +22,79 @@ class RapidataLeaderboard:
17
22
  id: The ID of the leaderboard.
18
23
  openapi_service: The OpenAPIService instance for API interaction.
19
24
  """
20
- def __init__(self,
21
- name: str,
22
- instruction: str,
23
- show_prompt: bool,
24
- show_prompt_asset: bool,
25
- inverse_ranking: bool,
26
- min_responses: int,
27
- response_budget: int,
28
- id: str,
29
- openapi_service: OpenAPIService):
25
+
26
+ def __init__(
27
+ self,
28
+ name: str,
29
+ instruction: str,
30
+ show_prompt: bool,
31
+ show_prompt_asset: bool,
32
+ inverse_ranking: bool,
33
+ response_budget: int,
34
+ id: str,
35
+ openapi_service: OpenAPIService,
36
+ ):
30
37
  self.__openapi_service = openapi_service
31
38
  self.__name = name
32
39
  self.__instruction = instruction
33
40
  self.__show_prompt = show_prompt
34
41
  self.__show_prompt_asset = show_prompt_asset
35
42
  self.__inverse_ranking = inverse_ranking
36
- self.__min_responses = min_responses
37
43
  self.__response_budget = response_budget
38
44
  self.id = id
39
45
 
40
46
  @property
41
- def response_budget(self) -> int:
47
+ def level_of_detail(self) -> Literal["low", "medium", "high", "very high"]:
42
48
  """
43
- Returns the response budget of the leaderboard.
49
+ Returns the level of detail of the leaderboard.
44
50
  """
45
- return self.__response_budget
46
-
47
- @property
48
- def min_responses(self) -> int:
51
+ return DetailMapper.get_level_of_detail(self.__response_budget)
52
+
53
+ @level_of_detail.setter
54
+ def level_of_detail(
55
+ self, level_of_detail: Literal["low", "medium", "high", "very high"]
56
+ ):
49
57
  """
50
- Returns the minimum number of responses required to be considered for the leaderboard.
58
+ Sets the level of detail of the leaderboard.
51
59
  """
52
- return self.__min_responses
53
-
60
+ logger.debug(f"Setting level of detail to {level_of_detail}")
61
+ self.__openapi_service.leaderboard_api.leaderboard_leaderboard_id_response_config_put(
62
+ leaderboard_id=self.id,
63
+ update_leaderboard_response_config_model=UpdateLeaderboardResponseConfigModel(
64
+ responseBudget=DetailMapper.get_budget(level_of_detail),
65
+ minResponses=DetailMapper.MIN_RESPONSES,
66
+ ),
67
+ )
68
+ self.__response_budget = DetailMapper.get_budget(level_of_detail)
69
+
54
70
  @property
55
71
  def show_prompt_asset(self) -> bool:
56
72
  """
57
73
  Returns whether the prompt asset is shown to the users.
58
74
  """
59
75
  return self.__show_prompt_asset
60
-
76
+
61
77
  @property
62
78
  def inverse_ranking(self) -> bool:
63
79
  """
64
80
  Returns whether the ranking is inverse.
65
81
  """
66
82
  return self.__inverse_ranking
67
-
83
+
68
84
  @property
69
85
  def show_prompt(self) -> bool:
70
86
  """
71
87
  Returns whether the prompt is shown to the users.
72
88
  """
73
89
  return self.__show_prompt
74
-
90
+
75
91
  @property
76
92
  def instruction(self) -> str:
77
93
  """
78
94
  Returns the instruction of the leaderboard.
79
95
  """
80
96
  return self.__instruction
81
-
97
+
82
98
  @property
83
99
  def name(self) -> str:
84
100
  """
@@ -100,28 +116,28 @@ class RapidataLeaderboard:
100
116
  """
101
117
 
102
118
  participants = self.__openapi_service.leaderboard_api.leaderboard_leaderboard_id_standings_get(
103
- leaderboard_id=self.id,
104
- tags=tags
119
+ leaderboard_id=self.id, tags=tags
105
120
  )
106
121
 
107
122
  standings = []
108
123
  for participant in participants.items:
109
- standings.append({
110
- "name": participant.name,
111
- "wins": participant.wins,
112
- "total_matches": participant.total_matches,
113
- "score": round(participant.score, 2) if participant.score is not None else None,
114
- })
124
+ standings.append(
125
+ {
126
+ "name": participant.name,
127
+ "wins": participant.wins,
128
+ "total_matches": participant.total_matches,
129
+ "score": (
130
+ round(participant.score, 2)
131
+ if participant.score is not None
132
+ else None
133
+ ),
134
+ }
135
+ )
115
136
 
116
137
  return pd.DataFrame(standings)
117
138
 
118
139
  def __str__(self) -> str:
119
140
  return f"RapidataLeaderboard(name={self.name}, instruction={self.instruction}, show_prompt={self.show_prompt}, leaderboard_id={self.id})"
120
-
141
+
121
142
  def __repr__(self) -> str:
122
143
  return self.__str__()
123
-
124
-
125
-
126
-
127
-
@@ -1,24 +1,34 @@
1
1
  import re
2
- from typing import Optional
2
+ from typing import Literal, Optional
3
3
  from rapidata.api_client.models.root_filter import RootFilter
4
4
  from rapidata.api_client.models.filter import Filter
5
5
  from rapidata.api_client.models.query_model import QueryModel
6
6
  from rapidata.api_client.models.page_info import PageInfo
7
7
  from rapidata.api_client.models.create_leaderboard_model import CreateLeaderboardModel
8
- from rapidata.api_client.models.create_benchmark_participant_model import CreateBenchmarkParticipantModel
8
+ from rapidata.api_client.models.create_benchmark_participant_model import (
9
+ CreateBenchmarkParticipantModel,
10
+ )
9
11
  from rapidata.api_client.models.submit_prompt_model import SubmitPromptModel
10
- from rapidata.api_client.models.submit_prompt_model_prompt_asset import SubmitPromptModelPromptAsset
12
+ from rapidata.api_client.models.submit_prompt_model_prompt_asset import (
13
+ SubmitPromptModelPromptAsset,
14
+ )
11
15
  from rapidata.api_client.models.url_asset_input import UrlAssetInput
12
16
  from rapidata.api_client.models.file_asset_model import FileAssetModel
13
17
  from rapidata.api_client.models.source_url_metadata_model import SourceUrlMetadataModel
14
18
 
15
19
 
16
- from rapidata.rapidata_client.benchmark.participant._participant import BenchmarkParticipant
20
+ from rapidata.rapidata_client.benchmark.participant._participant import (
21
+ BenchmarkParticipant,
22
+ )
17
23
  from rapidata.rapidata_client.logging import logger
18
24
  from rapidata.service.openapi_service import OpenAPIService
19
25
 
20
- from rapidata.rapidata_client.benchmark.leaderboard.rapidata_leaderboard import RapidataLeaderboard
26
+ from rapidata.rapidata_client.benchmark.leaderboard.rapidata_leaderboard import (
27
+ RapidataLeaderboard,
28
+ )
21
29
  from rapidata.rapidata_client.datapoints.assets import MediaAsset
30
+ from rapidata.rapidata_client.benchmark._detail_mapper import DetailMapper
31
+
22
32
 
23
33
  class RapidataBenchmark:
24
34
  """
@@ -31,6 +41,7 @@ class RapidataBenchmark:
31
41
  id: The id of the benchmark.
32
42
  openapi_service: The OpenAPI service to use to interact with the Rapidata API.
33
43
  """
44
+
34
45
  def __init__(self, name: str, id: str, openapi_service: OpenAPIService):
35
46
  self.name = name
36
47
  self.id = id
@@ -40,51 +51,54 @@ class RapidataBenchmark:
40
51
  self.__leaderboards: list[RapidataLeaderboard] = []
41
52
  self.__identifiers: list[str] = []
42
53
  self.__tags: list[list[str]] = []
43
-
54
+
44
55
  def __instantiate_prompts(self) -> None:
45
56
  current_page = 1
46
57
  total_pages = None
47
-
58
+
48
59
  while True:
49
- prompts_result = self.__openapi_service.benchmark_api.benchmark_benchmark_id_prompts_get(
50
- benchmark_id=self.id,
51
- request=QueryModel(
52
- page=PageInfo(
53
- index=current_page,
54
- size=100
55
- )
60
+ prompts_result = (
61
+ self.__openapi_service.benchmark_api.benchmark_benchmark_id_prompts_get(
62
+ benchmark_id=self.id,
63
+ request=QueryModel(page=PageInfo(index=current_page, size=100)),
56
64
  )
57
65
  )
58
-
66
+
59
67
  if prompts_result.total_pages is None:
60
- raise ValueError("An error occurred while fetching prompts: total_pages is None")
61
-
68
+ raise ValueError(
69
+ "An error occurred while fetching prompts: total_pages is None"
70
+ )
71
+
62
72
  total_pages = prompts_result.total_pages
63
-
73
+
64
74
  for prompt in prompts_result.items:
65
75
  self.__prompts.append(prompt.prompt)
66
76
  self.__identifiers.append(prompt.identifier)
67
77
  if prompt.prompt_asset is None:
68
78
  self.__prompt_assets.append(None)
69
79
  else:
70
- assert isinstance(prompt.prompt_asset.actual_instance, FileAssetModel)
71
- source_url = prompt.prompt_asset.actual_instance.metadata["sourceUrl"].actual_instance
80
+ assert isinstance(
81
+ prompt.prompt_asset.actual_instance, FileAssetModel
82
+ )
83
+ source_url = prompt.prompt_asset.actual_instance.metadata[
84
+ "sourceUrl"
85
+ ].actual_instance
72
86
  assert isinstance(source_url, SourceUrlMetadataModel)
73
87
  self.__prompt_assets.append(source_url.url)
74
88
 
75
89
  self.__tags.append(prompt.tags)
76
90
  if current_page >= total_pages:
77
91
  break
78
-
92
+
79
93
  current_page += 1
80
94
 
81
95
  @property
82
96
  def identifiers(self) -> list[str]:
83
97
  if not self.__identifiers:
84
98
  self.__instantiate_prompts()
85
-
99
+
86
100
  return self.__identifiers
87
-
101
+
88
102
  @property
89
103
  def prompts(self) -> list[str | None]:
90
104
  """
@@ -92,9 +106,9 @@ class RapidataBenchmark:
92
106
  """
93
107
  if not self.__prompts:
94
108
  self.__instantiate_prompts()
95
-
109
+
96
110
  return self.__prompts
97
-
111
+
98
112
  @property
99
113
  def prompt_assets(self) -> list[str | None]:
100
114
  """
@@ -102,9 +116,9 @@ class RapidataBenchmark:
102
116
  """
103
117
  if not self.__prompt_assets:
104
118
  self.__instantiate_prompts()
105
-
119
+
106
120
  return self.__prompt_assets
107
-
121
+
108
122
  @property
109
123
  def tags(self) -> list[list[str]]:
110
124
  """
@@ -112,9 +126,10 @@ class RapidataBenchmark:
112
126
  """
113
127
  if not self.__tags:
114
128
  self.__instantiate_prompts()
115
-
129
+
116
130
  return self.__tags
117
-
131
+
132
+ @property
118
133
  def leaderboards(self) -> list[RapidataLeaderboard]:
119
134
  """
120
135
  Returns the leaderboards that are registered for the benchmark.
@@ -122,48 +137,62 @@ class RapidataBenchmark:
122
137
  if not self.__leaderboards:
123
138
  current_page = 1
124
139
  total_pages = None
125
-
140
+
126
141
  while True:
127
- leaderboards_result = self.__openapi_service.leaderboard_api.leaderboards_get(
128
- request=QueryModel(
129
- filter=RootFilter(
130
- filters=[
131
- Filter(field="BenchmarkId", operator="Eq", value=self.id)
142
+ leaderboards_result = (
143
+ self.__openapi_service.leaderboard_api.leaderboards_get(
144
+ request=QueryModel(
145
+ filter=RootFilter(
146
+ filters=[
147
+ Filter(
148
+ field="BenchmarkId",
149
+ operator="Eq",
150
+ value=self.id,
151
+ )
132
152
  ]
133
153
  ),
134
- page=PageInfo(
135
- index=current_page,
136
- size=100
154
+ page=PageInfo(index=current_page, size=100),
137
155
  )
138
156
  )
139
157
  )
140
-
158
+
141
159
  if leaderboards_result.total_pages is None:
142
- raise ValueError("An error occurred while fetching leaderboards: total_pages is None")
143
-
160
+ raise ValueError(
161
+ "An error occurred while fetching leaderboards: total_pages is None"
162
+ )
163
+
144
164
  total_pages = leaderboards_result.total_pages
145
-
146
- self.__leaderboards.extend([
147
- RapidataLeaderboard(
148
- leaderboard.name,
149
- leaderboard.instruction,
150
- leaderboard.show_prompt,
151
- leaderboard.show_prompt_asset,
152
- leaderboard.is_inversed,
153
- leaderboard.min_responses,
154
- leaderboard.response_budget,
155
- leaderboard.id,
156
- self.__openapi_service
157
- ) for leaderboard in leaderboards_result.items])
158
-
165
+
166
+ self.__leaderboards.extend(
167
+ [
168
+ RapidataLeaderboard(
169
+ leaderboard.name,
170
+ leaderboard.instruction,
171
+ leaderboard.show_prompt,
172
+ leaderboard.show_prompt_asset,
173
+ leaderboard.is_inversed,
174
+ leaderboard.response_budget,
175
+ leaderboard.id,
176
+ self.__openapi_service,
177
+ )
178
+ for leaderboard in leaderboards_result.items
179
+ ]
180
+ )
181
+
159
182
  if current_page >= total_pages:
160
183
  break
161
-
184
+
162
185
  current_page += 1
163
-
186
+
164
187
  return self.__leaderboards
165
-
166
- def add_prompt(self, identifier: str, prompt: str | None = None, asset: str | None = None, tags: Optional[list[str]] = None):
188
+
189
+ def add_prompt(
190
+ self,
191
+ identifier: str,
192
+ prompt: str | None = None,
193
+ asset: str | None = None,
194
+ tags: Optional[list[str]] = None,
195
+ ):
167
196
  """
168
197
  Adds a prompt to the benchmark.
169
198
 
@@ -178,25 +207,27 @@ class RapidataBenchmark:
178
207
 
179
208
  if not isinstance(identifier, str):
180
209
  raise ValueError("Identifier must be a string.")
181
-
210
+
182
211
  if prompt is None and asset is None:
183
212
  raise ValueError("Prompt or asset must be provided.")
184
-
213
+
185
214
  if prompt is not None and not isinstance(prompt, str):
186
215
  raise ValueError("Prompt must be a string.")
187
-
216
+
188
217
  if asset is not None and not isinstance(asset, str):
189
218
  raise ValueError("Asset must be a string. That is the link to the asset.")
190
-
219
+
191
220
  if identifier in self.identifiers:
192
221
  raise ValueError("Identifier already exists in the benchmark.")
193
-
194
- if asset is not None and not re.match(r'^https?://', asset):
222
+
223
+ if asset is not None and not re.match(r"^https?://", asset):
195
224
  raise ValueError("Asset must be a link to the asset.")
196
-
197
- if tags is not None and (not isinstance(tags, list) or not all(isinstance(tag, str) for tag in tags)):
225
+
226
+ if tags is not None and (
227
+ not isinstance(tags, list) or not all(isinstance(tag, str) for tag in tags)
228
+ ):
198
229
  raise ValueError("Tags must be a list of strings.")
199
-
230
+
200
231
  self.__identifiers.append(identifier)
201
232
 
202
233
  self.__tags.append(tags)
@@ -208,25 +239,25 @@ class RapidataBenchmark:
208
239
  submit_prompt_model=SubmitPromptModel(
209
240
  identifier=identifier,
210
241
  prompt=prompt,
211
- promptAsset=SubmitPromptModelPromptAsset(
212
- UrlAssetInput(
213
- _t="UrlAssetInput",
214
- url=asset
242
+ promptAsset=(
243
+ SubmitPromptModelPromptAsset(
244
+ UrlAssetInput(_t="UrlAssetInput", url=asset)
215
245
  )
216
- ) if asset is not None else None,
217
- tags=tags
218
- )
246
+ if asset is not None
247
+ else None
248
+ ),
249
+ tags=tags,
250
+ ),
219
251
  )
220
252
 
221
253
  def create_leaderboard(
222
- self,
223
- name: str,
224
- instruction: str,
254
+ self,
255
+ name: str,
256
+ instruction: str,
225
257
  show_prompt: bool = False,
226
258
  show_prompt_asset: bool = False,
227
259
  inverse_ranking: bool = False,
228
- min_responses: int | None = None,
229
- response_budget: int | None = None
260
+ level_of_detail: Literal["low", "medium", "high", "very high"] = "low",
230
261
  ) -> RapidataLeaderboard:
231
262
  """
232
263
  Creates a new leaderboard for the benchmark.
@@ -237,13 +268,8 @@ class RapidataBenchmark:
237
268
  show_prompt: Whether to show the prompt to the users. (default: False)
238
269
  show_prompt_asset: Whether to show the prompt asset to the users. (only works if the prompt asset is a URL) (default: False)
239
270
  inverse_ranking: Whether to inverse the ranking of the leaderboard. (if the question is inversed, e.g. "Which video is worse?")
240
- min_responses: The minimum amount of responses that get collected per comparison. if None, it will be defaulted.
241
- response_budget: The total amount of responses that get collected per new model evaluation. if None, it will be defaulted. Values below 2000 are not recommended.
271
+ level_of_detail: The level of detail of the leaderboard. This will effect how many comparisons are done per model evaluation. (default: "low")
242
272
  """
243
-
244
- if response_budget is not None and response_budget < 2000:
245
- logger.warning("Response budget is below 2000. This is not recommended.")
246
-
247
273
  leaderboard_result = self.__openapi_service.leaderboard_api.leaderboard_post(
248
274
  create_leaderboard_model=CreateLeaderboardModel(
249
275
  benchmarkId=self.id,
@@ -252,12 +278,14 @@ class RapidataBenchmark:
252
278
  showPrompt=show_prompt,
253
279
  showPromptAsset=show_prompt_asset,
254
280
  isInversed=inverse_ranking,
255
- minResponses=min_responses,
256
- responseBudget=response_budget
281
+ minResponses=DetailMapper.MIN_RESPONSES,
282
+ responseBudget=DetailMapper.get_budget(level_of_detail),
257
283
  )
258
284
  )
259
285
 
260
- assert leaderboard_result.benchmark_id == self.id, "The leaderboard was not created for the correct benchmark."
286
+ assert (
287
+ leaderboard_result.benchmark_id == self.id
288
+ ), "The leaderboard was not created for the correct benchmark."
261
289
 
262
290
  return RapidataLeaderboard(
263
291
  name,
@@ -265,13 +293,14 @@ class RapidataBenchmark:
265
293
  show_prompt,
266
294
  show_prompt_asset,
267
295
  inverse_ranking,
268
- leaderboard_result.min_responses,
269
296
  leaderboard_result.response_budget,
270
297
  leaderboard_result.id,
271
- self.__openapi_service
298
+ self.__openapi_service,
272
299
  )
273
300
 
274
- def evaluate_model(self, name: str, media: list[str], identifiers: list[str]) -> None:
301
+ def evaluate_model(
302
+ self, name: str, media: list[str], identifiers: list[str]
303
+ ) -> None:
275
304
  """
276
305
  Evaluates a model on the benchmark across all leaderboards.
277
306
 
@@ -283,14 +312,16 @@ class RapidataBenchmark:
283
312
  """
284
313
  if not media:
285
314
  raise ValueError("Media must be a non-empty list of strings")
286
-
315
+
287
316
  if len(media) != len(identifiers):
288
317
  raise ValueError("Media and identifiers must have the same length")
289
-
318
+
290
319
  if not all(identifier in self.identifiers for identifier in identifiers):
291
- raise ValueError("All identifiers must be in the registered identifiers list. To see the registered identifiers, use the identifiers property.\
292
- \nTo see the prompts that are associated with the identifiers, use the prompts property.")
293
-
320
+ raise ValueError(
321
+ "All identifiers must be in the registered identifiers list. To see the registered identifiers, use the identifiers property.\
322
+ \nTo see the prompts that are associated with the identifiers, use the prompts property."
323
+ )
324
+
294
325
  # happens before the creation of the participant to ensure all media paths are valid
295
326
  assets: list[MediaAsset] = []
296
327
  for media_path in media:
@@ -300,12 +331,14 @@ class RapidataBenchmark:
300
331
  benchmark_id=self.id,
301
332
  create_benchmark_participant_model=CreateBenchmarkParticipantModel(
302
333
  name=name,
303
- )
334
+ ),
304
335
  )
305
336
 
306
337
  logger.info(f"Participant created: {participant_result.participant_id}")
307
338
 
308
- participant = BenchmarkParticipant(name, participant_result.participant_id, self.__openapi_service)
339
+ participant = BenchmarkParticipant(
340
+ name, participant_result.participant_id, self.__openapi_service
341
+ )
309
342
 
310
343
  successful_uploads, failed_uploads = participant.upload_media(
311
344
  assets,
@@ -313,15 +346,25 @@ class RapidataBenchmark:
313
346
  )
314
347
 
315
348
  total_uploads = len(assets)
316
- success_rate = (len(successful_uploads) / total_uploads * 100) if total_uploads > 0 else 0
317
- logger.info(f"Upload complete: {len(successful_uploads)} successful, {len(failed_uploads)} failed ({success_rate:.1f}% success rate)")
349
+ success_rate = (
350
+ (len(successful_uploads) / total_uploads * 100) if total_uploads > 0 else 0
351
+ )
352
+ logger.info(
353
+ f"Upload complete: {len(successful_uploads)} successful, {len(failed_uploads)} failed ({success_rate:.1f}% success rate)"
354
+ )
318
355
 
319
356
  if failed_uploads:
320
- logger.error(f"Failed uploads for media: {[asset.path for asset in failed_uploads]}")
321
- logger.warning("Some uploads failed. The model evaluation may be incomplete.")
357
+ logger.error(
358
+ f"Failed uploads for media: {[asset.path for asset in failed_uploads]}"
359
+ )
360
+ logger.warning(
361
+ "Some uploads failed. The model evaluation may be incomplete."
362
+ )
322
363
 
323
364
  if len(successful_uploads) == 0:
324
- raise RuntimeError("No uploads were successful. The model evaluation will not be completed.")
365
+ raise RuntimeError(
366
+ "No uploads were successful. The model evaluation will not be completed."
367
+ )
325
368
 
326
369
  self.__openapi_service.participant_api.participants_participant_id_submit_post(
327
370
  participant_id=participant_result.participant_id
@@ -329,6 +372,6 @@ class RapidataBenchmark:
329
372
 
330
373
  def __str__(self) -> str:
331
374
  return f"RapidataBenchmark(name={self.name}, id={self.id})"
332
-
375
+
333
376
  def __repr__(self) -> str:
334
377
  return self.__str__()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rapidata
3
- Version: 2.34.1
3
+ Version: 2.35.0
4
4
  Summary: Rapidata package containing the Rapidata Python Client to interact with the Rapidata Web API in an easy way.
5
5
  License: Apache-2.0
6
6
  Author: Rapidata AG
@@ -1,5 +1,5 @@
1
- rapidata/__init__.py,sha256=2xTTP9jdfs_KhD1-Ksz4fi6hYKPzVX4MBOXSlZRsCk0,897
2
- rapidata/api_client/__init__.py,sha256=N4Cx20ScEXZ9A2pdsn8ZPUuhnpeXlGByav3P_1ofQ_8,34733
1
+ rapidata/__init__.py,sha256=LHrPkAuvFTYjxU8zeMmh15W2xmXsvyVcIK_bv8qScRg,897
2
+ rapidata/api_client/__init__.py,sha256=fb2lqv3sj48wAgarp3g6hvtTgd7bfI01DJJLuBZQnFI,34850
3
3
  rapidata/api_client/api/__init__.py,sha256=dGnSE9oPO_ahGh-E1jtw4_VuM_vQueQFuv0IVMQo6uo,1546
4
4
  rapidata/api_client/api/benchmark_api.py,sha256=bC8hAPgHIDU5u1e0loWPWnZX33BW6gsAR8oc5199q2k,129777
5
5
  rapidata/api_client/api/campaign_api.py,sha256=406gNDALFb0sJhfx727ZM5_0GDX4iB0w5ym2dExLm4g,49894
@@ -12,7 +12,7 @@ rapidata/api_client/api/dataset_api.py,sha256=DUCEfP7jlMAAMdvEa-47xq0mq3MGcyk4DA
12
12
  rapidata/api_client/api/evaluation_workflow_api.py,sha256=E0Phmx54jzXx7LZYGquTqzZSrX2aE5PS9rAs5HdDjvs,15151
13
13
  rapidata/api_client/api/feedback_api.py,sha256=-ZI2-1HtQ7wAzBKClgXMmMHtYdgoZtWrpQql3p51qp0,11589
14
14
  rapidata/api_client/api/identity_api.py,sha256=LmK6cTXssNjCa1BteOMc8P4FsyRiHQ_Kr30vmWIAYko,55093
15
- rapidata/api_client/api/leaderboard_api.py,sha256=M82K7VI72c72Ij1_Svz7qm_ttVDuAilSi0g9B4BuU4g,167604
15
+ rapidata/api_client/api/leaderboard_api.py,sha256=wGnMTeaYsyeWwvptdD5R3OJK42ZJWQhyJvwDmN3bqQc,179275
16
16
  rapidata/api_client/api/newsletter_api.py,sha256=3NU6HO5Gtm-RH-nx5hcp2CCE4IZmWHwTfCLMMz-Xpq4,22655
17
17
  rapidata/api_client/api/order_api.py,sha256=6hD7a_8LVGuGdT_k1lE-gQKCWcSAcFMJO5Nsdc8xgbM,214715
18
18
  rapidata/api_client/api/participant_api.py,sha256=eOoZDCRF61bmPAcoMWhOREgUp3rEYhrQLEPJq8o27b4,87709
@@ -29,7 +29,7 @@ rapidata/api_client/api_client.py,sha256=EDhxAOUc5JFWvFsF1zc726Q7GoEFkuB8uor5SlG
29
29
  rapidata/api_client/api_response.py,sha256=eMxw1mpmJcoGZ3gs9z6jM4oYoZ10Gjk333s9sKxGv7s,652
30
30
  rapidata/api_client/configuration.py,sha256=g472vHVPLBotq8EkfSXP4sbp7xnn-3sb8O8BBlRWK1I,15931
31
31
  rapidata/api_client/exceptions.py,sha256=eLLd1fxM0Ygf3IIG6aNx9hdy79drst5Cem0UjI_NamM,5978
32
- rapidata/api_client/models/__init__.py,sha256=1mQliyAedV6ftD2vpUt5Dnv_E8JyeCFkSMZB_YMutC4,32646
32
+ rapidata/api_client/models/__init__.py,sha256=f6lXxDLfQKlvw0DZddgyzBZn3jEFyt5OBIitESxPbtQ,32763
33
33
  rapidata/api_client/models/ab_test_selection.py,sha256=xQcE1BgKSnkTcmIuroeVOAQcAhGkHLlMP9XjakMFgDc,4327
34
34
  rapidata/api_client/models/ab_test_selection_a_inner.py,sha256=VsCi27NBGxAtupB_sQZCzUEsTNNgSGV_Mo-Fi0UY1Jw,11657
35
35
  rapidata/api_client/models/add_campaign_artifact_result.py,sha256=4IvFVS-tLlL6eHsWp-IZ_ul5T30-h3YEwd2B5ioBbgY,2582
@@ -212,8 +212,8 @@ rapidata/api_client/models/file_asset_model_metadata_value.py,sha256=KiA2BENesZt
212
212
  rapidata/api_client/models/file_type.py,sha256=t7qrK5oEaVWWg7pis5mO2BviaVNuAUh7O1SNib_kfGw,783
213
213
  rapidata/api_client/models/file_type_metadata.py,sha256=qvRJ0SWEy2mPfvjGAc1GV5AMRjOm4EMXRAQ-sTupv6Q,3412
214
214
  rapidata/api_client/models/file_type_metadata_model.py,sha256=1BHfr0wk3HQSNmMTDWdROPtLsPwE88Z2D1YaEnOte7E,3308
215
- rapidata/api_client/models/filter.py,sha256=-gje_jVjRXyeguRj9yGuHgyi53hn871Z_2ULs_5yVr8,4541
216
- rapidata/api_client/models/filter_operator.py,sha256=hVWuGOU6iseDxdJCG4E-b5Pbb8w3rCLT26ukvmwx6G0,889
215
+ rapidata/api_client/models/filter.py,sha256=cIvVdYntcGLQLgW0iNTqGdEj_jJQQBcMt9pF_l_FVug,4553
216
+ rapidata/api_client/models/filter_operator.py,sha256=zPAPopNy6Jlw-3Gz-tEsape0SyriGvfXREij57bNdP4,903
217
217
  rapidata/api_client/models/form_file_wrapper.py,sha256=1KSzAKKGE_-yXTyIyjHi5ctc_CrGCRfkX3EMqlW0IFA,4208
218
218
  rapidata/api_client/models/free_text_payload.py,sha256=u5p0ybMMOH2ecmzT3FauQs0LgQgLNnUOD8Kk1umOqtw,2952
219
219
  rapidata/api_client/models/free_text_rapid_blueprint.py,sha256=Tpf2durJj9olZbI-QPl89BiLESrOs9fDbATpch8tRj4,2988
@@ -425,7 +425,7 @@ rapidata/api_client/models/root_filter.py,sha256=oBtXjKE0i3m_HmD1XeHwaLCFFQRkpkW
425
425
  rapidata/api_client/models/run_status.py,sha256=wtGbdMPDcpR35pMbczVABkYfERTWnocMon2s-uaaaUM,798
426
426
  rapidata/api_client/models/runs_by_leaderboard_result.py,sha256=5sRSV7d9rxjBbdTnWk10ke1GW4jVKZ8O5iSMclthryk,3581
427
427
  rapidata/api_client/models/runs_by_leaderboard_result_paged_result.py,sha256=Ts5gkkQLKIbISLay6xlVYdu0vyXxAwtY7mwQMKobC-Y,3559
428
- rapidata/api_client/models/sample_by_participant.py,sha256=N1DHLru8cPit7-sT_8a1Ein47slvtH6UsJwPUYmZjgs,4437
428
+ rapidata/api_client/models/sample_by_participant.py,sha256=-CxSOHphruBeeSTblX4yrERL4o7PFx4ofnfyl9JMcko,4508
429
429
  rapidata/api_client/models/sample_by_participant_paged_result.py,sha256=FKobGmMEymLQwRbwdS5R-nox_AtFAxg9CuVopWNQgTk,3526
430
430
  rapidata/api_client/models/scrub_payload.py,sha256=tX-QU_a8GUQWBPb1GofGLFupucZF5TY2LUpqdyfHDSI,2920
431
431
  rapidata/api_client/models/scrub_range.py,sha256=2P__eZ4HeAxWcjFkp-p938Ih8GHf0rJea18sIGxUN0A,2527
@@ -494,6 +494,7 @@ rapidata/api_client/models/update_campaign_model.py,sha256=9mCp3fQZqtecsCQS1SBbf
494
494
  rapidata/api_client/models/update_dataset_name_model.py,sha256=IeGRrxo6G2fKfMwnoBFYj4BW9ksGo_8db21VcLYfLvc,2599
495
495
  rapidata/api_client/models/update_dimensions_model.py,sha256=jDg2114Y14AxcQHg_C6oTxMZ17Cq4vEQRtzeNjN70dk,2591
496
496
  rapidata/api_client/models/update_leaderboard_name_model.py,sha256=QmEy8MNkrQYyJtXR91MKN1j511YFIIW-ovN3MxvFoWw,2614
497
+ rapidata/api_client/models/update_leaderboard_response_config_model.py,sha256=Jkp3zS358CD1EPhknNp1voUOAtuQFPBstlw1SMJc6JI,2978
497
498
  rapidata/api_client/models/update_order_model.py,sha256=RUlxnzLqO6o-w5EEPb8wv1ANRKpkSbs8PhGM42T35uw,2570
498
499
  rapidata/api_client/models/update_order_name_model.py,sha256=Cm8qZUJKgx1JTgkhlJcVNdLwPnRV8gqeeo7G4bVDOS4,2582
499
500
  rapidata/api_client/models/update_participant_name_model.py,sha256=PPXeS5euTpMt7QrmLWordYU1tGS1gZ-zwjgQDglld_g,2614
@@ -536,16 +537,17 @@ rapidata/api_client/models/workflow_split_model_filter_configs_inner.py,sha256=1
536
537
  rapidata/api_client/models/workflow_state.py,sha256=5LAK1se76RCoozeVB6oxMPb8p_5bhLZJqn7q5fFQWis,850
537
538
  rapidata/api_client/models/zip_entry_file_wrapper.py,sha256=06CoNJD3x511K3rnSmkrwwhc9GbQxwxF-c0ldOyJbAs,4240
538
539
  rapidata/api_client/rest.py,sha256=rtIMcgINZOUaDFaJIinJkXRSddNJmXvMRMfgO2Ezk2o,10835
539
- rapidata/api_client_README.md,sha256=hFJwQC9pI9JtopNmka-9TUPcRqhY3zhhrZj0ebJizVs,62286
540
+ rapidata/api_client_README.md,sha256=5k-14YuFPSGEpYGJsIL0xwyEWeIvuWd3VOI7jFYw_dc,62649
540
541
  rapidata/rapidata_client/__init__.py,sha256=CfkQxCdURXzJsVP6sxKmufze2u-IE_snG_G8NEkE_JM,1225
541
542
  rapidata/rapidata_client/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
542
543
  rapidata/rapidata_client/api/rapidata_exception.py,sha256=BIdmHRrJUGW-Mqhp1H_suemZaR6w9TgjWq-ZW5iUPdQ,3878
543
544
  rapidata/rapidata_client/benchmark/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
545
+ rapidata/rapidata_client/benchmark/_detail_mapper.py,sha256=IcCW0d3_vwAdyG7V9P5UMPASoblk6JX5UEhrfHcaAIs,967
544
546
  rapidata/rapidata_client/benchmark/leaderboard/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
545
- rapidata/rapidata_client/benchmark/leaderboard/rapidata_leaderboard.py,sha256=PKqcW_pQJLmNg_feLDCtF-9SufMw2_EU4yKFo6QxxR0,3950
547
+ rapidata/rapidata_client/benchmark/leaderboard/rapidata_leaderboard.py,sha256=ucbE6wSNaT8Li_XTxoze1iEYppaHw8OTcqlP-PW5KMw,4767
546
548
  rapidata/rapidata_client/benchmark/participant/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
547
549
  rapidata/rapidata_client/benchmark/participant/_participant.py,sha256=yN82EWrZXYszsM8Ns0HRMXCTivltkyxcpGRK-cdT01Y,3683
548
- rapidata/rapidata_client/benchmark/rapidata_benchmark.py,sha256=J84_FHDefNom8MvAhyB8MPuSIuMgRPSYy81yTwlbb8Y,14117
550
+ rapidata/rapidata_client/benchmark/rapidata_benchmark.py,sha256=wZgN79jV4WF6HtNYl8gqTHArCEi8WHy3zN1xnxzTS6A,14268
549
551
  rapidata/rapidata_client/benchmark/rapidata_benchmark_manager.py,sha256=NnXCsU2_8SlGxdI51MeIUWK7Ku0pEylDSPhnzOPzTV0,5291
550
552
  rapidata/rapidata_client/country_codes/__init__.py,sha256=FB9Dcks44J6C6YBSYmTmNZ71tE130x6NO_3aLJ8fKzQ,40
551
553
  rapidata/rapidata_client/country_codes/country_codes.py,sha256=ePHqeb7y9DWQZAnddBzPx1puYBcrgUjdR2sbFijuFD8,283
@@ -649,7 +651,7 @@ rapidata/service/__init__.py,sha256=s9bS1AJZaWIhLtJX_ZA40_CK39rAAkwdAmymTMbeWl4,
649
651
  rapidata/service/credential_manager.py,sha256=pUEEtp6VrFWYhfUUtyqmS0AlRqe2Y0kFkY6o22IT4KM,8682
650
652
  rapidata/service/local_file_service.py,sha256=pgorvlWcx52Uh3cEG6VrdMK_t__7dacQ_5AnfY14BW8,877
651
653
  rapidata/service/openapi_service.py,sha256=v2fhPbHmD0J11ZRZY6f80PdIdGwpRFlbfMH9t8Ypg5A,5403
652
- rapidata-2.34.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
653
- rapidata-2.34.1.dist-info/METADATA,sha256=dGCNxI6ZrfsjRqw9pltK0B4ShiQcSyAXZZjWZipsXEI,1264
654
- rapidata-2.34.1.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
655
- rapidata-2.34.1.dist-info/RECORD,,
654
+ rapidata-2.35.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
655
+ rapidata-2.35.0.dist-info/METADATA,sha256=lGStAMXDWn_ztSIkIrN6bpJJ2HiZE7ivo1Z0LQ_eRAo,1264
656
+ rapidata-2.35.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
657
+ rapidata-2.35.0.dist-info/RECORD,,