rapidata 2.29.1__py3-none-any.whl → 2.30.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rapidata might be problematic. Click here for more details.

@@ -0,0 +1,39 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Rapidata.Dataset
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: v1
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import json
17
+ from enum import Enum
18
+ from typing_extensions import Self
19
+
20
+
21
+ class RunStatus(str, Enum):
22
+ """
23
+ RunStatus
24
+ """
25
+
26
+ """
27
+ allowed enum values
28
+ """
29
+ QUEUED = 'Queued'
30
+ RUNNING = 'Running'
31
+ COMPLETED = 'Completed'
32
+ FAILED = 'Failed'
33
+
34
+ @classmethod
35
+ def from_json(cls, json_str: str) -> Self:
36
+ """Create an instance of RunStatus from a JSON string"""
37
+ return cls(json.loads(json_str))
38
+
39
+
@@ -0,0 +1,110 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Rapidata.Dataset
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: v1
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from datetime import datetime
21
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr, field_validator
22
+ from typing import Any, ClassVar, Dict, List, Optional
23
+ from typing import Optional, Set
24
+ from typing_extensions import Self
25
+
26
+ class RunsByLeaderboardResult(BaseModel):
27
+ """
28
+ RunsByLeaderboardResult
29
+ """ # noqa: E501
30
+ id: StrictStr
31
+ name: StrictStr
32
+ status: StrictStr
33
+ created_at: datetime = Field(alias="createdAt")
34
+ owner_mail: StrictStr = Field(alias="ownerMail")
35
+ order_id: Optional[StrictStr] = Field(default=None, alias="orderId")
36
+ __properties: ClassVar[List[str]] = ["id", "name", "status", "createdAt", "ownerMail", "orderId"]
37
+
38
+ @field_validator('status')
39
+ def status_validate_enum(cls, value):
40
+ """Validates the enum"""
41
+ if value not in set(['Queued', 'Running', 'Completed', 'Failed']):
42
+ raise ValueError("must be one of enum values ('Queued', 'Running', 'Completed', 'Failed')")
43
+ return value
44
+
45
+ model_config = ConfigDict(
46
+ populate_by_name=True,
47
+ validate_assignment=True,
48
+ protected_namespaces=(),
49
+ )
50
+
51
+
52
+ def to_str(self) -> str:
53
+ """Returns the string representation of the model using alias"""
54
+ return pprint.pformat(self.model_dump(by_alias=True))
55
+
56
+ def to_json(self) -> str:
57
+ """Returns the JSON representation of the model using alias"""
58
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
59
+ return json.dumps(self.to_dict())
60
+
61
+ @classmethod
62
+ def from_json(cls, json_str: str) -> Optional[Self]:
63
+ """Create an instance of RunsByLeaderboardResult from a JSON string"""
64
+ return cls.from_dict(json.loads(json_str))
65
+
66
+ def to_dict(self) -> Dict[str, Any]:
67
+ """Return the dictionary representation of the model using alias.
68
+
69
+ This has the following differences from calling pydantic's
70
+ `self.model_dump(by_alias=True)`:
71
+
72
+ * `None` is only added to the output dict for nullable fields that
73
+ were set at model initialization. Other fields with value `None`
74
+ are ignored.
75
+ """
76
+ excluded_fields: Set[str] = set([
77
+ ])
78
+
79
+ _dict = self.model_dump(
80
+ by_alias=True,
81
+ exclude=excluded_fields,
82
+ exclude_none=True,
83
+ )
84
+ # set to None if order_id (nullable) is None
85
+ # and model_fields_set contains the field
86
+ if self.order_id is None and "order_id" in self.model_fields_set:
87
+ _dict['orderId'] = None
88
+
89
+ return _dict
90
+
91
+ @classmethod
92
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
93
+ """Create an instance of RunsByLeaderboardResult from a dict"""
94
+ if obj is None:
95
+ return None
96
+
97
+ if not isinstance(obj, dict):
98
+ return cls.model_validate(obj)
99
+
100
+ _obj = cls.model_validate({
101
+ "id": obj.get("id"),
102
+ "name": obj.get("name"),
103
+ "status": obj.get("status"),
104
+ "createdAt": obj.get("createdAt"),
105
+ "ownerMail": obj.get("ownerMail"),
106
+ "orderId": obj.get("orderId")
107
+ })
108
+ return _obj
109
+
110
+
@@ -0,0 +1,105 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Rapidata.Dataset
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: v1
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictInt
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from rapidata.api_client.models.runs_by_leaderboard_result import RunsByLeaderboardResult
23
+ from typing import Optional, Set
24
+ from typing_extensions import Self
25
+
26
+ class RunsByLeaderboardResultPagedResult(BaseModel):
27
+ """
28
+ RunsByLeaderboardResultPagedResult
29
+ """ # noqa: E501
30
+ total: StrictInt
31
+ page: StrictInt
32
+ page_size: StrictInt = Field(alias="pageSize")
33
+ items: List[RunsByLeaderboardResult]
34
+ total_pages: Optional[StrictInt] = Field(default=None, alias="totalPages")
35
+ __properties: ClassVar[List[str]] = ["total", "page", "pageSize", "items", "totalPages"]
36
+
37
+ model_config = ConfigDict(
38
+ populate_by_name=True,
39
+ validate_assignment=True,
40
+ protected_namespaces=(),
41
+ )
42
+
43
+
44
+ def to_str(self) -> str:
45
+ """Returns the string representation of the model using alias"""
46
+ return pprint.pformat(self.model_dump(by_alias=True))
47
+
48
+ def to_json(self) -> str:
49
+ """Returns the JSON representation of the model using alias"""
50
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
51
+ return json.dumps(self.to_dict())
52
+
53
+ @classmethod
54
+ def from_json(cls, json_str: str) -> Optional[Self]:
55
+ """Create an instance of RunsByLeaderboardResultPagedResult from a JSON string"""
56
+ return cls.from_dict(json.loads(json_str))
57
+
58
+ def to_dict(self) -> Dict[str, Any]:
59
+ """Return the dictionary representation of the model using alias.
60
+
61
+ This has the following differences from calling pydantic's
62
+ `self.model_dump(by_alias=True)`:
63
+
64
+ * `None` is only added to the output dict for nullable fields that
65
+ were set at model initialization. Other fields with value `None`
66
+ are ignored.
67
+ * OpenAPI `readOnly` fields are excluded.
68
+ """
69
+ excluded_fields: Set[str] = set([
70
+ "total_pages",
71
+ ])
72
+
73
+ _dict = self.model_dump(
74
+ by_alias=True,
75
+ exclude=excluded_fields,
76
+ exclude_none=True,
77
+ )
78
+ # override the default output from pydantic by calling `to_dict()` of each item in items (list)
79
+ _items = []
80
+ if self.items:
81
+ for _item_items in self.items:
82
+ if _item_items:
83
+ _items.append(_item_items.to_dict())
84
+ _dict['items'] = _items
85
+ return _dict
86
+
87
+ @classmethod
88
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
89
+ """Create an instance of RunsByLeaderboardResultPagedResult from a dict"""
90
+ if obj is None:
91
+ return None
92
+
93
+ if not isinstance(obj, dict):
94
+ return cls.model_validate(obj)
95
+
96
+ _obj = cls.model_validate({
97
+ "total": obj.get("total"),
98
+ "page": obj.get("page"),
99
+ "pageSize": obj.get("pageSize"),
100
+ "items": [RunsByLeaderboardResult.from_dict(_item) for _item in obj["items"]] if obj.get("items") is not None else None,
101
+ "totalPages": obj.get("totalPages")
102
+ })
103
+ return _obj
104
+
105
+
@@ -0,0 +1,87 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Rapidata.Dataset
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: v1
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
+ from typing import Any, ClassVar, Dict, List
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class UpdateBenchmarkNameModel(BaseModel):
26
+ """
27
+ The model used to update the name of a benchmark.
28
+ """ # noqa: E501
29
+ name: StrictStr = Field(description="The new name of the benchmark.")
30
+ __properties: ClassVar[List[str]] = ["name"]
31
+
32
+ model_config = ConfigDict(
33
+ populate_by_name=True,
34
+ validate_assignment=True,
35
+ protected_namespaces=(),
36
+ )
37
+
38
+
39
+ def to_str(self) -> str:
40
+ """Returns the string representation of the model using alias"""
41
+ return pprint.pformat(self.model_dump(by_alias=True))
42
+
43
+ def to_json(self) -> str:
44
+ """Returns the JSON representation of the model using alias"""
45
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
46
+ return json.dumps(self.to_dict())
47
+
48
+ @classmethod
49
+ def from_json(cls, json_str: str) -> Optional[Self]:
50
+ """Create an instance of UpdateBenchmarkNameModel from a JSON string"""
51
+ return cls.from_dict(json.loads(json_str))
52
+
53
+ def to_dict(self) -> Dict[str, Any]:
54
+ """Return the dictionary representation of the model using alias.
55
+
56
+ This has the following differences from calling pydantic's
57
+ `self.model_dump(by_alias=True)`:
58
+
59
+ * `None` is only added to the output dict for nullable fields that
60
+ were set at model initialization. Other fields with value `None`
61
+ are ignored.
62
+ """
63
+ excluded_fields: Set[str] = set([
64
+ ])
65
+
66
+ _dict = self.model_dump(
67
+ by_alias=True,
68
+ exclude=excluded_fields,
69
+ exclude_none=True,
70
+ )
71
+ return _dict
72
+
73
+ @classmethod
74
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
75
+ """Create an instance of UpdateBenchmarkNameModel from a dict"""
76
+ if obj is None:
77
+ return None
78
+
79
+ if not isinstance(obj, dict):
80
+ return cls.model_validate(obj)
81
+
82
+ _obj = cls.model_validate({
83
+ "name": obj.get("name")
84
+ })
85
+ return _obj
86
+
87
+
@@ -0,0 +1,87 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Rapidata.Dataset
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: v1
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
+ from typing import Any, ClassVar, Dict, List
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class UpdateLeaderboardNameModel(BaseModel):
26
+ """
27
+ The model used to update the name of a leaderboard.
28
+ """ # noqa: E501
29
+ name: StrictStr = Field(description="The new name of the leaderboard.")
30
+ __properties: ClassVar[List[str]] = ["name"]
31
+
32
+ model_config = ConfigDict(
33
+ populate_by_name=True,
34
+ validate_assignment=True,
35
+ protected_namespaces=(),
36
+ )
37
+
38
+
39
+ def to_str(self) -> str:
40
+ """Returns the string representation of the model using alias"""
41
+ return pprint.pformat(self.model_dump(by_alias=True))
42
+
43
+ def to_json(self) -> str:
44
+ """Returns the JSON representation of the model using alias"""
45
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
46
+ return json.dumps(self.to_dict())
47
+
48
+ @classmethod
49
+ def from_json(cls, json_str: str) -> Optional[Self]:
50
+ """Create an instance of UpdateLeaderboardNameModel from a JSON string"""
51
+ return cls.from_dict(json.loads(json_str))
52
+
53
+ def to_dict(self) -> Dict[str, Any]:
54
+ """Return the dictionary representation of the model using alias.
55
+
56
+ This has the following differences from calling pydantic's
57
+ `self.model_dump(by_alias=True)`:
58
+
59
+ * `None` is only added to the output dict for nullable fields that
60
+ were set at model initialization. Other fields with value `None`
61
+ are ignored.
62
+ """
63
+ excluded_fields: Set[str] = set([
64
+ ])
65
+
66
+ _dict = self.model_dump(
67
+ by_alias=True,
68
+ exclude=excluded_fields,
69
+ exclude_none=True,
70
+ )
71
+ return _dict
72
+
73
+ @classmethod
74
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
75
+ """Create an instance of UpdateLeaderboardNameModel from a dict"""
76
+ if obj is None:
77
+ return None
78
+
79
+ if not isinstance(obj, dict):
80
+ return cls.model_validate(obj)
81
+
82
+ _obj = cls.model_validate({
83
+ "name": obj.get("name")
84
+ })
85
+ return _obj
86
+
87
+
@@ -74,6 +74,7 @@ Class | Method | HTTP request | Description
74
74
  ------------ | ------------- | ------------- | -------------
75
75
  *BenchmarkApi* | [**benchmark_benchmark_id_delete**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_delete) | **DELETE** /benchmark/{benchmarkId} | Deletes a single benchmark.
76
76
  *BenchmarkApi* | [**benchmark_benchmark_id_get**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_get) | **GET** /benchmark/{benchmarkId} | Returns a single benchmark by its ID.
77
+ *BenchmarkApi* | [**benchmark_benchmark_id_name_put**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_name_put) | **PUT** /benchmark/{benchmarkId}/name | Updates the name of a benchmark.
77
78
  *BenchmarkApi* | [**benchmark_benchmark_id_participant_participant_id_delete**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_participant_participant_id_delete) | **DELETE** /benchmark/{benchmarkId}/participant/{participantId} | Deletes a participant on a benchmark.
78
79
  *BenchmarkApi* | [**benchmark_benchmark_id_participant_participant_id_get**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_participant_participant_id_get) | **GET** /benchmark/{benchmarkId}/participant/{participantId} | Gets a participant by it's Id.
79
80
  *BenchmarkApi* | [**benchmark_benchmark_id_participants_get**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_participants_get) | **GET** /benchmark/{benchmarkId}/participants | Query all participants within a benchmark
@@ -127,6 +128,7 @@ Class | Method | HTTP request | Description
127
128
  *LeaderboardApi* | [**leaderboard_leaderboard_id_boost_post**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_boost_post) | **POST** /leaderboard/{leaderboardId}/boost | Boosts a subset of participants within a leaderboard.
128
129
  *LeaderboardApi* | [**leaderboard_leaderboard_id_delete**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_delete) | **DELETE** /leaderboard/{leaderboardId} | Deletes a leaderboard by its ID.
129
130
  *LeaderboardApi* | [**leaderboard_leaderboard_id_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_get) | **GET** /leaderboard/{leaderboardId} | Gets a leaderboard by its ID.
131
+ *LeaderboardApi* | [**leaderboard_leaderboard_id_name_put**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_name_put) | **PUT** /leaderboard/{leaderboardId}/name | Updates the name of a leaderboard.
130
132
  *LeaderboardApi* | [**leaderboard_leaderboard_id_participant_participant_id_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_participant_participant_id_get) | **GET** /leaderboard/{leaderboardId}/participant/{participantId} | Gets a participant by its ID.
131
133
  *LeaderboardApi* | [**leaderboard_leaderboard_id_participants_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_participants_get) | **GET** /leaderboard/{leaderboardId}/participants | queries all the participants connected to leaderboard by its ID.
132
134
  *LeaderboardApi* | [**leaderboard_leaderboard_id_participants_participant_id_submit_post**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_participants_participant_id_submit_post) | **POST** /leaderboard/{leaderboardId}/participants/{participantId}/submit | Submits a participant to a leaderboard.
@@ -134,6 +136,7 @@ Class | Method | HTTP request | Description
134
136
  *LeaderboardApi* | [**leaderboard_leaderboard_id_prompts_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_prompts_get) | **GET** /leaderboard/{leaderboardId}/prompts | returns the paged prompts of a leaderboard by its ID.
135
137
  *LeaderboardApi* | [**leaderboard_leaderboard_id_prompts_post**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_prompts_post) | **POST** /leaderboard/{leaderboardId}/prompts | adds a new prompt to a leaderboard.
136
138
  *LeaderboardApi* | [**leaderboard_leaderboard_id_refresh_post**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_refresh_post) | **POST** /leaderboard/{leaderboardId}/refresh | This will force an update to all standings of a leaderboard. this could happen if the recorded matches and scores are out of sync
139
+ *LeaderboardApi* | [**leaderboard_leaderboard_id_runs_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_runs_get) | **GET** /leaderboard/{leaderboardId}/runs | Gets the runs related to a leaderboard
137
140
  *LeaderboardApi* | [**leaderboard_leaderboard_id_standings_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_leaderboard_id_standings_get) | **GET** /leaderboard/{leaderboardId}/standings | queries all the participants connected to leaderboard by its ID.
138
141
  *LeaderboardApi* | [**leaderboard_post**](rapidata/api_client/docs/LeaderboardApi.md#leaderboard_post) | **POST** /leaderboard | Creates a new leaderboard with the specified name and criteria.
139
142
  *LeaderboardApi* | [**leaderboards_get**](rapidata/api_client/docs/LeaderboardApi.md#leaderboards_get) | **GET** /leaderboards | Queries all leaderboards of the user.
@@ -484,6 +487,9 @@ Class | Method | HTTP request | Description
484
487
  - [ResponseCountUserFilterModel](rapidata/api_client/docs/ResponseCountUserFilterModel.md)
485
488
  - [RetrievalMode](rapidata/api_client/docs/RetrievalMode.md)
486
489
  - [RootFilter](rapidata/api_client/docs/RootFilter.md)
490
+ - [RunStatus](rapidata/api_client/docs/RunStatus.md)
491
+ - [RunsByLeaderboardResult](rapidata/api_client/docs/RunsByLeaderboardResult.md)
492
+ - [RunsByLeaderboardResultPagedResult](rapidata/api_client/docs/RunsByLeaderboardResultPagedResult.md)
487
493
  - [ScrubPayload](rapidata/api_client/docs/ScrubPayload.md)
488
494
  - [ScrubRange](rapidata/api_client/docs/ScrubRange.md)
489
495
  - [ScrubRapidBlueprint](rapidata/api_client/docs/ScrubRapidBlueprint.md)
@@ -533,9 +539,11 @@ Class | Method | HTTP request | Description
533
539
  - [TranslatedPromptMetadataModel](rapidata/api_client/docs/TranslatedPromptMetadataModel.md)
534
540
  - [TranslatedString](rapidata/api_client/docs/TranslatedString.md)
535
541
  - [UnlockOrderResult](rapidata/api_client/docs/UnlockOrderResult.md)
542
+ - [UpdateBenchmarkNameModel](rapidata/api_client/docs/UpdateBenchmarkNameModel.md)
536
543
  - [UpdateCampaignModel](rapidata/api_client/docs/UpdateCampaignModel.md)
537
544
  - [UpdateDatasetNameModel](rapidata/api_client/docs/UpdateDatasetNameModel.md)
538
545
  - [UpdateDimensionsModel](rapidata/api_client/docs/UpdateDimensionsModel.md)
546
+ - [UpdateLeaderboardNameModel](rapidata/api_client/docs/UpdateLeaderboardNameModel.md)
539
547
  - [UpdateOrderNameModel](rapidata/api_client/docs/UpdateOrderNameModel.md)
540
548
  - [UpdateValidationRapidModel](rapidata/api_client/docs/UpdateValidationRapidModel.md)
541
549
  - [UpdateValidationRapidModelTruth](rapidata/api_client/docs/UpdateValidationRapidModelTruth.md)
@@ -24,6 +24,7 @@ class RapidataLeaderboard:
24
24
  name: str,
25
25
  instruction: str,
26
26
  show_prompt: bool,
27
+ show_prompt_asset: bool,
27
28
  inverse_ranking: bool,
28
29
  min_responses: int,
29
30
  response_budget: int,
@@ -33,6 +34,7 @@ class RapidataLeaderboard:
33
34
  self.__name = name
34
35
  self.__instruction = instruction
35
36
  self.__show_prompt = show_prompt
37
+ self.__show_prompt_asset = show_prompt_asset
36
38
  self.__inverse_ranking = inverse_ranking
37
39
  self.__min_responses = min_responses
38
40
  self.__response_budget = response_budget
@@ -52,6 +54,13 @@ class RapidataLeaderboard:
52
54
  """
53
55
  return self.__min_responses
54
56
 
57
+ @property
58
+ def show_prompt_asset(self) -> bool:
59
+ """
60
+ Returns whether the prompt asset is shown to the users.
61
+ """
62
+ return self.__show_prompt_asset
63
+
55
64
  @property
56
65
  def inverse_ranking(self) -> bool:
57
66
  """
@@ -1,3 +1,4 @@
1
+ import re
1
2
  from rapidata.api_client.models.root_filter import RootFilter
2
3
  from rapidata.api_client.models.filter import Filter
3
4
  from rapidata.api_client.models.query_model import QueryModel
@@ -5,6 +6,10 @@ from rapidata.api_client.models.page_info import PageInfo
5
6
  from rapidata.api_client.models.create_leaderboard_model import CreateLeaderboardModel
6
7
  from rapidata.api_client.models.create_benchmark_participant_model import CreateBenchmarkParticipantModel
7
8
  from rapidata.api_client.models.submit_prompt_model import SubmitPromptModel
9
+ from rapidata.api_client.models.submit_prompt_model_prompt_asset import SubmitPromptModelPromptAsset
10
+ from rapidata.api_client.models.url_asset_input import UrlAssetInput
11
+ from rapidata.api_client.models.file_asset_model import FileAssetModel
12
+ from rapidata.api_client.models.source_url_metadata_model import SourceUrlMetadataModel
8
13
 
9
14
  from rapidata.rapidata_client.logging import logger
10
15
  from rapidata.service.openapi_service import OpenAPIService
@@ -29,7 +34,8 @@ class RapidataBenchmark:
29
34
  self.name = name
30
35
  self.id = id
31
36
  self.__openapi_service = openapi_service
32
- self.__prompts: list[str] = []
37
+ self.__prompts: list[str | None] = []
38
+ self.__prompt_assets: list[str | None] = []
33
39
  self.__leaderboards: list[RapidataLeaderboard] = []
34
40
  self.__identifiers: list[str] = []
35
41
 
@@ -53,8 +59,16 @@ class RapidataBenchmark:
53
59
 
54
60
  total_pages = prompts_result.total_pages
55
61
 
56
- self.__prompts.extend([prompt.prompt for prompt in prompts_result.items])
57
- self.__identifiers.extend([prompt.identifier for prompt in prompts_result.items])
62
+ for prompt in prompts_result.items:
63
+ self.__prompts.append(prompt.prompt)
64
+ self.__identifiers.append(prompt.identifier)
65
+ if prompt.prompt_asset is None:
66
+ self.__prompt_assets.append(None)
67
+ else:
68
+ assert isinstance(prompt.prompt_asset.actual_instance, FileAssetModel)
69
+ source_url = prompt.prompt_asset.actual_instance.metadata["sourceUrl"].actual_instance
70
+ assert isinstance(source_url, SourceUrlMetadataModel)
71
+ self.__prompt_assets.append(source_url.url)
58
72
 
59
73
  if current_page >= total_pages:
60
74
  break
@@ -62,7 +76,14 @@ class RapidataBenchmark:
62
76
  current_page += 1
63
77
 
64
78
  @property
65
- def prompts(self) -> list[str]:
79
+ def identifiers(self) -> list[str]:
80
+ if not self.__identifiers:
81
+ self.__instantiate_prompts()
82
+
83
+ return self.__identifiers
84
+
85
+ @property
86
+ def prompts(self) -> list[str | None]:
66
87
  """
67
88
  Returns the prompts that are registered for the leaderboard.
68
89
  """
@@ -72,11 +93,14 @@ class RapidataBenchmark:
72
93
  return self.__prompts
73
94
 
74
95
  @property
75
- def identifiers(self) -> list[str]:
76
- if not self.__identifiers:
96
+ def prompt_assets(self) -> list[str | None]:
97
+ """
98
+ Returns the prompt assets that are registered for the benchmark.
99
+ """
100
+ if not self.__prompt_assets:
77
101
  self.__instantiate_prompts()
78
102
 
79
- return self.__identifiers
103
+ return self.__prompt_assets
80
104
 
81
105
  @property
82
106
  def leaderboards(self) -> list[RapidataLeaderboard]:
@@ -112,6 +136,7 @@ class RapidataBenchmark:
112
136
  leaderboard.name,
113
137
  leaderboard.instruction,
114
138
  leaderboard.show_prompt,
139
+ leaderboard.show_prompt_asset,
115
140
  leaderboard.is_inversed,
116
141
  leaderboard.min_responses,
117
142
  leaderboard.response_budget,
@@ -126,24 +151,49 @@ class RapidataBenchmark:
126
151
 
127
152
  return self.__leaderboards
128
153
 
129
- def add_prompt(self, identifier: str, prompt: str):
154
+ def add_prompt(self, identifier: str, prompt: str | None = None, asset: str | None = None):
130
155
  """
131
156
  Adds a prompt to the benchmark.
157
+
158
+ Args:
159
+ identifier: The identifier of the prompt/asset that will be used to match up the media.
160
+ prompt: The prompt that will be used to evaluate the model.
161
+ asset: The asset that will be used to evaluate the model. Provided as a link to the asset.
132
162
  """
133
- if not isinstance(identifier, str) or not isinstance(prompt, str):
134
- raise ValueError("Identifier and prompt must be strings.")
163
+ if not isinstance(identifier, str):
164
+ raise ValueError("Identifier must be a string.")
165
+
166
+ if prompt is None and asset is None:
167
+ raise ValueError("Prompt or asset must be provided.")
168
+
169
+ if prompt is not None and not isinstance(prompt, str):
170
+ raise ValueError("Prompt must be a string.")
171
+
172
+ if asset is not None and not isinstance(asset, str):
173
+ raise ValueError("Asset must be a string. That is the link to the asset.")
135
174
 
136
175
  if identifier in self.identifiers:
137
176
  raise ValueError("Identifier already exists in the benchmark.")
138
177
 
178
+ if asset is not None and not re.match(r'^https?://', asset):
179
+ raise ValueError("Asset must be a link to the asset.")
180
+
139
181
  self.__identifiers.append(identifier)
182
+
140
183
  self.__prompts.append(prompt)
184
+ self.__prompt_assets.append(asset)
141
185
 
142
186
  self.__openapi_service.benchmark_api.benchmark_benchmark_id_prompt_post(
143
187
  benchmark_id=self.id,
144
188
  submit_prompt_model=SubmitPromptModel(
145
189
  identifier=identifier,
146
190
  prompt=prompt,
191
+ promptAsset=SubmitPromptModelPromptAsset(
192
+ UrlAssetInput(
193
+ _t="UrlAssetInput",
194
+ url=asset
195
+ )
196
+ ) if asset is not None else None
147
197
  )
148
198
  )
149
199
 
@@ -151,7 +201,8 @@ class RapidataBenchmark:
151
201
  self,
152
202
  name: str,
153
203
  instruction: str,
154
- show_prompt: bool,
204
+ show_prompt: bool = False,
205
+ show_prompt_asset: bool = False,
155
206
  inverse_ranking: bool = False,
156
207
  min_responses: int | None = None,
157
208
  response_budget: int | None = None
@@ -162,7 +213,8 @@ class RapidataBenchmark:
162
213
  Args:
163
214
  name: The name of the leaderboard. (not shown to the users)
164
215
  instruction: The instruction decides how the models will be evaluated.
165
- show_prompt: Whether to show the prompt to the users.
216
+ show_prompt: Whether to show the prompt to the users. (default: False)
217
+ show_prompt_asset: Whether to show the prompt asset to the users. (only works if the prompt asset is a URL) (default: False)
166
218
  inverse_ranking: Whether to inverse the ranking of the leaderboard. (if the question is inversed, e.g. "Which video is worse?")
167
219
  min_responses: The minimum amount of responses that get collected per comparison. if None, it will be defaulted.
168
220
  response_budget: The total amount of responses that get collected per new model evaluation. if None, it will be defaulted. Values below 2000 are not recommended.
@@ -177,6 +229,7 @@ class RapidataBenchmark:
177
229
  name=name,
178
230
  instruction=instruction,
179
231
  showPrompt=show_prompt,
232
+ showPromptAsset=show_prompt_asset,
180
233
  isInversed=inverse_ranking,
181
234
  minResponses=min_responses,
182
235
  responseBudget=response_budget
@@ -189,6 +242,7 @@ class RapidataBenchmark:
189
242
  name,
190
243
  instruction,
191
244
  show_prompt,
245
+ show_prompt_asset,
192
246
  inverse_ranking,
193
247
  leaderboard_result.min_responses,
194
248
  leaderboard_result.response_budget,