rapidata 2.40.6__py3-none-any.whl → 2.41.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rapidata might be problematic. Click here for more details.
- rapidata/__init__.py +2 -1
- rapidata/api_client/__init__.py +5 -3
- rapidata/api_client/api/benchmark_api.py +610 -7
- rapidata/api_client/api/campaign_api.py +283 -0
- rapidata/api_client/api/evaluation_workflow_api.py +18 -72
- rapidata/api_client/api/identity_api.py +0 -278
- rapidata/api_client/api/leaderboard_api.py +9 -9
- rapidata/api_client/api/validation_set_api.py +5 -15
- rapidata/api_client/models/__init__.py +5 -3
- rapidata/api_client/models/benchmark_query_result.py +3 -1
- rapidata/api_client/models/create_benchmark_participant_result.py +2 -2
- rapidata/api_client/models/create_benchmark_prompt_result.py +87 -0
- rapidata/api_client/models/get_benchmark_by_id_result.py +4 -2
- rapidata/api_client/models/get_evaluation_workflow_results_model.py +114 -0
- rapidata/api_client/models/get_participant_by_id_result.py +2 -4
- rapidata/api_client/models/get_standing_by_id_result.py +2 -9
- rapidata/api_client/models/labeling_selection.py +9 -2
- rapidata/api_client/models/participant_by_benchmark.py +1 -3
- rapidata/api_client/models/standing_by_benchmark.py +113 -0
- rapidata/api_client/models/standing_by_leaderboard.py +1 -3
- rapidata/api_client/models/standings_by_benchmark_result.py +95 -0
- rapidata/api_client/models/sticky_state.py +1 -0
- rapidata/api_client/models/update_benchmark_model.py +5 -3
- rapidata/api_client/models/update_priority_model.py +87 -0
- rapidata/api_client/models/validation_set_model.py +32 -2
- rapidata/api_client_README.md +9 -5
- rapidata/rapidata_client/__init__.py +1 -0
- rapidata/rapidata_client/api/rapidata_api_client.py +3 -3
- rapidata/rapidata_client/benchmark/rapidata_benchmark_manager.py +3 -3
- rapidata/rapidata_client/settings/__init__.py +1 -0
- rapidata/rapidata_client/settings/rapidata_settings.py +3 -0
- rapidata/rapidata_client/settings/swap_context_instruction.py +20 -0
- rapidata/rapidata_client/validation/rapids/rapids.py +13 -0
- {rapidata-2.40.6.dist-info → rapidata-2.41.0.dist-info}/METADATA +1 -1
- {rapidata-2.40.6.dist-info → rapidata-2.41.0.dist-info}/RECORD +37 -31
- {rapidata-2.40.6.dist-info → rapidata-2.41.0.dist-info}/LICENSE +0 -0
- {rapidata-2.40.6.dist-info → rapidata-2.41.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Rapidata.Dataset
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: v1
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from rapidata.api_client.models.page_info import PageInfo
|
|
23
|
+
from rapidata.api_client.models.rapid_state import RapidState
|
|
24
|
+
from rapidata.api_client.models.sort_criterion import SortCriterion
|
|
25
|
+
from typing import Optional, Set
|
|
26
|
+
from typing_extensions import Self
|
|
27
|
+
|
|
28
|
+
class GetEvaluationWorkflowResultsModel(BaseModel):
|
|
29
|
+
"""
|
|
30
|
+
Model for getting the overview of a simple workflow result.
|
|
31
|
+
""" # noqa: E501
|
|
32
|
+
page: Optional[PageInfo] = None
|
|
33
|
+
states: Optional[List[RapidState]] = Field(default=None, description="An optional list of states to filter the rapids by.")
|
|
34
|
+
sort_criteria: Optional[List[SortCriterion]] = Field(default=None, description="A list of criteria to sort the results by.", alias="sortCriteria")
|
|
35
|
+
__properties: ClassVar[List[str]] = ["page", "states", "sortCriteria"]
|
|
36
|
+
|
|
37
|
+
model_config = ConfigDict(
|
|
38
|
+
populate_by_name=True,
|
|
39
|
+
validate_assignment=True,
|
|
40
|
+
protected_namespaces=(),
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def to_str(self) -> str:
|
|
45
|
+
"""Returns the string representation of the model using alias"""
|
|
46
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
47
|
+
|
|
48
|
+
def to_json(self) -> str:
|
|
49
|
+
"""Returns the JSON representation of the model using alias"""
|
|
50
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
51
|
+
return json.dumps(self.to_dict())
|
|
52
|
+
|
|
53
|
+
@classmethod
|
|
54
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
55
|
+
"""Create an instance of GetEvaluationWorkflowResultsModel from a JSON string"""
|
|
56
|
+
return cls.from_dict(json.loads(json_str))
|
|
57
|
+
|
|
58
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
59
|
+
"""Return the dictionary representation of the model using alias.
|
|
60
|
+
|
|
61
|
+
This has the following differences from calling pydantic's
|
|
62
|
+
`self.model_dump(by_alias=True)`:
|
|
63
|
+
|
|
64
|
+
* `None` is only added to the output dict for nullable fields that
|
|
65
|
+
were set at model initialization. Other fields with value `None`
|
|
66
|
+
are ignored.
|
|
67
|
+
"""
|
|
68
|
+
excluded_fields: Set[str] = set([
|
|
69
|
+
])
|
|
70
|
+
|
|
71
|
+
_dict = self.model_dump(
|
|
72
|
+
by_alias=True,
|
|
73
|
+
exclude=excluded_fields,
|
|
74
|
+
exclude_none=True,
|
|
75
|
+
)
|
|
76
|
+
# override the default output from pydantic by calling `to_dict()` of page
|
|
77
|
+
if self.page:
|
|
78
|
+
_dict['page'] = self.page.to_dict()
|
|
79
|
+
# override the default output from pydantic by calling `to_dict()` of each item in sort_criteria (list)
|
|
80
|
+
_items = []
|
|
81
|
+
if self.sort_criteria:
|
|
82
|
+
for _item_sort_criteria in self.sort_criteria:
|
|
83
|
+
if _item_sort_criteria:
|
|
84
|
+
_items.append(_item_sort_criteria.to_dict())
|
|
85
|
+
_dict['sortCriteria'] = _items
|
|
86
|
+
# set to None if states (nullable) is None
|
|
87
|
+
# and model_fields_set contains the field
|
|
88
|
+
if self.states is None and "states" in self.model_fields_set:
|
|
89
|
+
_dict['states'] = None
|
|
90
|
+
|
|
91
|
+
# set to None if sort_criteria (nullable) is None
|
|
92
|
+
# and model_fields_set contains the field
|
|
93
|
+
if self.sort_criteria is None and "sort_criteria" in self.model_fields_set:
|
|
94
|
+
_dict['sortCriteria'] = None
|
|
95
|
+
|
|
96
|
+
return _dict
|
|
97
|
+
|
|
98
|
+
@classmethod
|
|
99
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
100
|
+
"""Create an instance of GetEvaluationWorkflowResultsModel from a dict"""
|
|
101
|
+
if obj is None:
|
|
102
|
+
return None
|
|
103
|
+
|
|
104
|
+
if not isinstance(obj, dict):
|
|
105
|
+
return cls.model_validate(obj)
|
|
106
|
+
|
|
107
|
+
_obj = cls.model_validate({
|
|
108
|
+
"page": PageInfo.from_dict(obj["page"]) if obj.get("page") is not None else None,
|
|
109
|
+
"states": obj.get("states"),
|
|
110
|
+
"sortCriteria": [SortCriterion.from_dict(_item) for _item in obj["sortCriteria"]] if obj.get("sortCriteria") is not None else None
|
|
111
|
+
})
|
|
112
|
+
return _obj
|
|
113
|
+
|
|
114
|
+
|
|
@@ -29,8 +29,7 @@ class GetParticipantByIdResult(BaseModel):
|
|
|
29
29
|
id: StrictStr
|
|
30
30
|
name: StrictStr
|
|
31
31
|
benchmark_id: StrictStr = Field(alias="benchmarkId")
|
|
32
|
-
|
|
33
|
-
__properties: ClassVar[List[str]] = ["id", "name", "benchmarkId", "datasetId"]
|
|
32
|
+
__properties: ClassVar[List[str]] = ["id", "name", "benchmarkId"]
|
|
34
33
|
|
|
35
34
|
model_config = ConfigDict(
|
|
36
35
|
populate_by_name=True,
|
|
@@ -85,8 +84,7 @@ class GetParticipantByIdResult(BaseModel):
|
|
|
85
84
|
_obj = cls.model_validate({
|
|
86
85
|
"id": obj.get("id"),
|
|
87
86
|
"name": obj.get("name"),
|
|
88
|
-
"benchmarkId": obj.get("benchmarkId")
|
|
89
|
-
"datasetId": obj.get("datasetId")
|
|
87
|
+
"benchmarkId": obj.get("benchmarkId")
|
|
90
88
|
})
|
|
91
89
|
return _obj
|
|
92
90
|
|
|
@@ -18,7 +18,7 @@ import re # noqa: F401
|
|
|
18
18
|
import json
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
|
|
21
|
-
from typing import Any, ClassVar, Dict, List
|
|
21
|
+
from typing import Any, ClassVar, Dict, List
|
|
22
22
|
from rapidata.api_client.models.standing_status import StandingStatus
|
|
23
23
|
from typing import Optional, Set
|
|
24
24
|
from typing_extensions import Self
|
|
@@ -30,10 +30,9 @@ class GetStandingByIdResult(BaseModel):
|
|
|
30
30
|
id: StrictStr
|
|
31
31
|
name: StrictStr
|
|
32
32
|
benchmark_id: StrictStr = Field(alias="benchmarkId")
|
|
33
|
-
dataset_id: Optional[StrictStr] = Field(default=None, alias="datasetId")
|
|
34
33
|
status: StandingStatus
|
|
35
34
|
is_disabled: StrictBool = Field(alias="isDisabled")
|
|
36
|
-
__properties: ClassVar[List[str]] = ["id", "name", "benchmarkId", "
|
|
35
|
+
__properties: ClassVar[List[str]] = ["id", "name", "benchmarkId", "status", "isDisabled"]
|
|
37
36
|
|
|
38
37
|
model_config = ConfigDict(
|
|
39
38
|
populate_by_name=True,
|
|
@@ -74,11 +73,6 @@ class GetStandingByIdResult(BaseModel):
|
|
|
74
73
|
exclude=excluded_fields,
|
|
75
74
|
exclude_none=True,
|
|
76
75
|
)
|
|
77
|
-
# set to None if dataset_id (nullable) is None
|
|
78
|
-
# and model_fields_set contains the field
|
|
79
|
-
if self.dataset_id is None and "dataset_id" in self.model_fields_set:
|
|
80
|
-
_dict['datasetId'] = None
|
|
81
|
-
|
|
82
76
|
return _dict
|
|
83
77
|
|
|
84
78
|
@classmethod
|
|
@@ -94,7 +88,6 @@ class GetStandingByIdResult(BaseModel):
|
|
|
94
88
|
"id": obj.get("id"),
|
|
95
89
|
"name": obj.get("name"),
|
|
96
90
|
"benchmarkId": obj.get("benchmarkId"),
|
|
97
|
-
"datasetId": obj.get("datasetId"),
|
|
98
91
|
"status": obj.get("status"),
|
|
99
92
|
"isDisabled": obj.get("isDisabled")
|
|
100
93
|
})
|
|
@@ -28,10 +28,11 @@ class LabelingSelection(BaseModel):
|
|
|
28
28
|
LabelingSelection
|
|
29
29
|
""" # noqa: E501
|
|
30
30
|
t: StrictStr = Field(description="Discriminator value for LabelingSelection", alias="_t")
|
|
31
|
-
amount: StrictInt
|
|
31
|
+
amount: Optional[StrictInt] = None
|
|
32
|
+
effort_budget: Optional[StrictInt] = Field(default=None, alias="effortBudget")
|
|
32
33
|
retrieval_mode: Optional[RetrievalMode] = Field(default=None, alias="retrievalMode")
|
|
33
34
|
max_iterations: Optional[StrictInt] = Field(default=None, alias="maxIterations")
|
|
34
|
-
__properties: ClassVar[List[str]] = ["_t", "amount", "retrievalMode", "maxIterations"]
|
|
35
|
+
__properties: ClassVar[List[str]] = ["_t", "amount", "effortBudget", "retrievalMode", "maxIterations"]
|
|
35
36
|
|
|
36
37
|
@field_validator('t')
|
|
37
38
|
def t_validate_enum(cls, value):
|
|
@@ -79,6 +80,11 @@ class LabelingSelection(BaseModel):
|
|
|
79
80
|
exclude=excluded_fields,
|
|
80
81
|
exclude_none=True,
|
|
81
82
|
)
|
|
83
|
+
# set to None if amount (nullable) is None
|
|
84
|
+
# and model_fields_set contains the field
|
|
85
|
+
if self.amount is None and "amount" in self.model_fields_set:
|
|
86
|
+
_dict['amount'] = None
|
|
87
|
+
|
|
82
88
|
# set to None if max_iterations (nullable) is None
|
|
83
89
|
# and model_fields_set contains the field
|
|
84
90
|
if self.max_iterations is None and "max_iterations" in self.model_fields_set:
|
|
@@ -98,6 +104,7 @@ class LabelingSelection(BaseModel):
|
|
|
98
104
|
_obj = cls.model_validate({
|
|
99
105
|
"_t": obj.get("_t") if obj.get("_t") is not None else 'LabelingSelection',
|
|
100
106
|
"amount": obj.get("amount"),
|
|
107
|
+
"effortBudget": obj.get("effortBudget"),
|
|
101
108
|
"retrievalMode": obj.get("retrievalMode"),
|
|
102
109
|
"maxIterations": obj.get("maxIterations")
|
|
103
110
|
})
|
|
@@ -30,9 +30,8 @@ class ParticipantByBenchmark(BaseModel):
|
|
|
30
30
|
id: StrictStr
|
|
31
31
|
name: StrictStr
|
|
32
32
|
benchmark_id: StrictStr = Field(alias="benchmarkId")
|
|
33
|
-
dataset_id: StrictStr = Field(alias="datasetId")
|
|
34
33
|
status: ParticipantStatus
|
|
35
|
-
__properties: ClassVar[List[str]] = ["id", "name", "benchmarkId", "
|
|
34
|
+
__properties: ClassVar[List[str]] = ["id", "name", "benchmarkId", "status"]
|
|
36
35
|
|
|
37
36
|
model_config = ConfigDict(
|
|
38
37
|
populate_by_name=True,
|
|
@@ -88,7 +87,6 @@ class ParticipantByBenchmark(BaseModel):
|
|
|
88
87
|
"id": obj.get("id"),
|
|
89
88
|
"name": obj.get("name"),
|
|
90
89
|
"benchmarkId": obj.get("benchmarkId"),
|
|
91
|
-
"datasetId": obj.get("datasetId"),
|
|
92
90
|
"status": obj.get("status")
|
|
93
91
|
})
|
|
94
92
|
return _obj
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Rapidata.Dataset
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: v1
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictFloat, StrictInt, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional, Union
|
|
22
|
+
from rapidata.api_client.models.confidence_interval import ConfidenceInterval
|
|
23
|
+
from rapidata.api_client.models.standing_status import StandingStatus
|
|
24
|
+
from typing import Optional, Set
|
|
25
|
+
from typing_extensions import Self
|
|
26
|
+
|
|
27
|
+
class StandingByBenchmark(BaseModel):
|
|
28
|
+
"""
|
|
29
|
+
StandingByBenchmark
|
|
30
|
+
""" # noqa: E501
|
|
31
|
+
id: StrictStr
|
|
32
|
+
name: StrictStr
|
|
33
|
+
benchmark_id: StrictStr = Field(alias="benchmarkId")
|
|
34
|
+
status: StandingStatus
|
|
35
|
+
score: Optional[Union[StrictFloat, StrictInt]] = None
|
|
36
|
+
wins: Union[StrictFloat, StrictInt]
|
|
37
|
+
total_matches: Union[StrictFloat, StrictInt] = Field(alias="totalMatches")
|
|
38
|
+
is_disabled: StrictBool = Field(alias="isDisabled")
|
|
39
|
+
confidence_interval: Optional[ConfidenceInterval] = Field(default=None, alias="confidenceInterval")
|
|
40
|
+
__properties: ClassVar[List[str]] = ["id", "name", "benchmarkId", "status", "score", "wins", "totalMatches", "isDisabled", "confidenceInterval"]
|
|
41
|
+
|
|
42
|
+
model_config = ConfigDict(
|
|
43
|
+
populate_by_name=True,
|
|
44
|
+
validate_assignment=True,
|
|
45
|
+
protected_namespaces=(),
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def to_str(self) -> str:
|
|
50
|
+
"""Returns the string representation of the model using alias"""
|
|
51
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
52
|
+
|
|
53
|
+
def to_json(self) -> str:
|
|
54
|
+
"""Returns the JSON representation of the model using alias"""
|
|
55
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
56
|
+
return json.dumps(self.to_dict())
|
|
57
|
+
|
|
58
|
+
@classmethod
|
|
59
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
60
|
+
"""Create an instance of StandingByBenchmark from a JSON string"""
|
|
61
|
+
return cls.from_dict(json.loads(json_str))
|
|
62
|
+
|
|
63
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
64
|
+
"""Return the dictionary representation of the model using alias.
|
|
65
|
+
|
|
66
|
+
This has the following differences from calling pydantic's
|
|
67
|
+
`self.model_dump(by_alias=True)`:
|
|
68
|
+
|
|
69
|
+
* `None` is only added to the output dict for nullable fields that
|
|
70
|
+
were set at model initialization. Other fields with value `None`
|
|
71
|
+
are ignored.
|
|
72
|
+
"""
|
|
73
|
+
excluded_fields: Set[str] = set([
|
|
74
|
+
])
|
|
75
|
+
|
|
76
|
+
_dict = self.model_dump(
|
|
77
|
+
by_alias=True,
|
|
78
|
+
exclude=excluded_fields,
|
|
79
|
+
exclude_none=True,
|
|
80
|
+
)
|
|
81
|
+
# override the default output from pydantic by calling `to_dict()` of confidence_interval
|
|
82
|
+
if self.confidence_interval:
|
|
83
|
+
_dict['confidenceInterval'] = self.confidence_interval.to_dict()
|
|
84
|
+
# set to None if score (nullable) is None
|
|
85
|
+
# and model_fields_set contains the field
|
|
86
|
+
if self.score is None and "score" in self.model_fields_set:
|
|
87
|
+
_dict['score'] = None
|
|
88
|
+
|
|
89
|
+
return _dict
|
|
90
|
+
|
|
91
|
+
@classmethod
|
|
92
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
93
|
+
"""Create an instance of StandingByBenchmark from a dict"""
|
|
94
|
+
if obj is None:
|
|
95
|
+
return None
|
|
96
|
+
|
|
97
|
+
if not isinstance(obj, dict):
|
|
98
|
+
return cls.model_validate(obj)
|
|
99
|
+
|
|
100
|
+
_obj = cls.model_validate({
|
|
101
|
+
"id": obj.get("id"),
|
|
102
|
+
"name": obj.get("name"),
|
|
103
|
+
"benchmarkId": obj.get("benchmarkId"),
|
|
104
|
+
"status": obj.get("status"),
|
|
105
|
+
"score": obj.get("score"),
|
|
106
|
+
"wins": obj.get("wins"),
|
|
107
|
+
"totalMatches": obj.get("totalMatches"),
|
|
108
|
+
"isDisabled": obj.get("isDisabled"),
|
|
109
|
+
"confidenceInterval": ConfidenceInterval.from_dict(obj["confidenceInterval"]) if obj.get("confidenceInterval") is not None else None
|
|
110
|
+
})
|
|
111
|
+
return _obj
|
|
112
|
+
|
|
113
|
+
|
|
@@ -31,14 +31,13 @@ class StandingByLeaderboard(BaseModel):
|
|
|
31
31
|
id: StrictStr
|
|
32
32
|
name: StrictStr
|
|
33
33
|
leaderboard_id: StrictStr = Field(alias="leaderboardId")
|
|
34
|
-
dataset_id: Optional[StrictStr] = Field(default=None, alias="datasetId")
|
|
35
34
|
status: StandingStatus
|
|
36
35
|
score: Optional[Union[StrictFloat, StrictInt]] = None
|
|
37
36
|
wins: Union[StrictFloat, StrictInt]
|
|
38
37
|
total_matches: Union[StrictFloat, StrictInt] = Field(alias="totalMatches")
|
|
39
38
|
is_disabled: StrictBool = Field(alias="isDisabled")
|
|
40
39
|
confidence_interval: Optional[ConfidenceInterval] = Field(default=None, alias="confidenceInterval")
|
|
41
|
-
__properties: ClassVar[List[str]] = ["id", "name", "leaderboardId", "
|
|
40
|
+
__properties: ClassVar[List[str]] = ["id", "name", "leaderboardId", "status", "score", "wins", "totalMatches", "isDisabled", "confidenceInterval"]
|
|
42
41
|
|
|
43
42
|
model_config = ConfigDict(
|
|
44
43
|
populate_by_name=True,
|
|
@@ -102,7 +101,6 @@ class StandingByLeaderboard(BaseModel):
|
|
|
102
101
|
"id": obj.get("id"),
|
|
103
102
|
"name": obj.get("name"),
|
|
104
103
|
"leaderboardId": obj.get("leaderboardId"),
|
|
105
|
-
"datasetId": obj.get("datasetId"),
|
|
106
104
|
"status": obj.get("status"),
|
|
107
105
|
"score": obj.get("score"),
|
|
108
106
|
"wins": obj.get("wins"),
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Rapidata.Dataset
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: v1
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict
|
|
21
|
+
from typing import Any, ClassVar, Dict, List
|
|
22
|
+
from rapidata.api_client.models.standing_by_benchmark import StandingByBenchmark
|
|
23
|
+
from typing import Optional, Set
|
|
24
|
+
from typing_extensions import Self
|
|
25
|
+
|
|
26
|
+
class StandingsByBenchmarkResult(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
StandingsByBenchmarkResult
|
|
29
|
+
""" # noqa: E501
|
|
30
|
+
items: List[StandingByBenchmark]
|
|
31
|
+
__properties: ClassVar[List[str]] = ["items"]
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
populate_by_name=True,
|
|
35
|
+
validate_assignment=True,
|
|
36
|
+
protected_namespaces=(),
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def to_str(self) -> str:
|
|
41
|
+
"""Returns the string representation of the model using alias"""
|
|
42
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
43
|
+
|
|
44
|
+
def to_json(self) -> str:
|
|
45
|
+
"""Returns the JSON representation of the model using alias"""
|
|
46
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
47
|
+
return json.dumps(self.to_dict())
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
51
|
+
"""Create an instance of StandingsByBenchmarkResult from a JSON string"""
|
|
52
|
+
return cls.from_dict(json.loads(json_str))
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Return the dictionary representation of the model using alias.
|
|
56
|
+
|
|
57
|
+
This has the following differences from calling pydantic's
|
|
58
|
+
`self.model_dump(by_alias=True)`:
|
|
59
|
+
|
|
60
|
+
* `None` is only added to the output dict for nullable fields that
|
|
61
|
+
were set at model initialization. Other fields with value `None`
|
|
62
|
+
are ignored.
|
|
63
|
+
"""
|
|
64
|
+
excluded_fields: Set[str] = set([
|
|
65
|
+
])
|
|
66
|
+
|
|
67
|
+
_dict = self.model_dump(
|
|
68
|
+
by_alias=True,
|
|
69
|
+
exclude=excluded_fields,
|
|
70
|
+
exclude_none=True,
|
|
71
|
+
)
|
|
72
|
+
# override the default output from pydantic by calling `to_dict()` of each item in items (list)
|
|
73
|
+
_items = []
|
|
74
|
+
if self.items:
|
|
75
|
+
for _item_items in self.items:
|
|
76
|
+
if _item_items:
|
|
77
|
+
_items.append(_item_items.to_dict())
|
|
78
|
+
_dict['items'] = _items
|
|
79
|
+
return _dict
|
|
80
|
+
|
|
81
|
+
@classmethod
|
|
82
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
83
|
+
"""Create an instance of StandingsByBenchmarkResult from a dict"""
|
|
84
|
+
if obj is None:
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
if not isinstance(obj, dict):
|
|
88
|
+
return cls.model_validate(obj)
|
|
89
|
+
|
|
90
|
+
_obj = cls.model_validate({
|
|
91
|
+
"items": [StandingByBenchmark.from_dict(_item) for _item in obj["items"]] if obj.get("items") is not None else None
|
|
92
|
+
})
|
|
93
|
+
return _obj
|
|
94
|
+
|
|
95
|
+
|
|
@@ -17,7 +17,7 @@ import pprint
|
|
|
17
17
|
import re # noqa: F401
|
|
18
18
|
import json
|
|
19
19
|
|
|
20
|
-
from pydantic import BaseModel, ConfigDict, StrictStr
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
|
|
21
21
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
22
|
from typing import Optional, Set
|
|
23
23
|
from typing_extensions import Self
|
|
@@ -27,7 +27,8 @@ class UpdateBenchmarkModel(BaseModel):
|
|
|
27
27
|
The model used to update a benchmark.
|
|
28
28
|
""" # noqa: E501
|
|
29
29
|
name: Optional[StrictStr] = None
|
|
30
|
-
|
|
30
|
+
is_public: Optional[StrictBool] = Field(default=None, alias="isPublic")
|
|
31
|
+
__properties: ClassVar[List[str]] = ["name", "isPublic"]
|
|
31
32
|
|
|
32
33
|
model_config = ConfigDict(
|
|
33
34
|
populate_by_name=True,
|
|
@@ -80,7 +81,8 @@ class UpdateBenchmarkModel(BaseModel):
|
|
|
80
81
|
return cls.model_validate(obj)
|
|
81
82
|
|
|
82
83
|
_obj = cls.model_validate({
|
|
83
|
-
"name": obj.get("name")
|
|
84
|
+
"name": obj.get("name"),
|
|
85
|
+
"isPublic": obj.get("isPublic")
|
|
84
86
|
})
|
|
85
87
|
return _obj
|
|
86
88
|
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Rapidata.Dataset
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: v1
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictInt
|
|
21
|
+
from typing import Any, ClassVar, Dict, List
|
|
22
|
+
from typing import Optional, Set
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
class UpdatePriorityModel(BaseModel):
|
|
26
|
+
"""
|
|
27
|
+
The model to update the campaign priority.
|
|
28
|
+
""" # noqa: E501
|
|
29
|
+
priority: StrictInt = Field(description="The new priority value for the campaign.")
|
|
30
|
+
__properties: ClassVar[List[str]] = ["priority"]
|
|
31
|
+
|
|
32
|
+
model_config = ConfigDict(
|
|
33
|
+
populate_by_name=True,
|
|
34
|
+
validate_assignment=True,
|
|
35
|
+
protected_namespaces=(),
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def to_str(self) -> str:
|
|
40
|
+
"""Returns the string representation of the model using alias"""
|
|
41
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
42
|
+
|
|
43
|
+
def to_json(self) -> str:
|
|
44
|
+
"""Returns the JSON representation of the model using alias"""
|
|
45
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
46
|
+
return json.dumps(self.to_dict())
|
|
47
|
+
|
|
48
|
+
@classmethod
|
|
49
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
50
|
+
"""Create an instance of UpdatePriorityModel from a JSON string"""
|
|
51
|
+
return cls.from_dict(json.loads(json_str))
|
|
52
|
+
|
|
53
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
54
|
+
"""Return the dictionary representation of the model using alias.
|
|
55
|
+
|
|
56
|
+
This has the following differences from calling pydantic's
|
|
57
|
+
`self.model_dump(by_alias=True)`:
|
|
58
|
+
|
|
59
|
+
* `None` is only added to the output dict for nullable fields that
|
|
60
|
+
were set at model initialization. Other fields with value `None`
|
|
61
|
+
are ignored.
|
|
62
|
+
"""
|
|
63
|
+
excluded_fields: Set[str] = set([
|
|
64
|
+
])
|
|
65
|
+
|
|
66
|
+
_dict = self.model_dump(
|
|
67
|
+
by_alias=True,
|
|
68
|
+
exclude=excluded_fields,
|
|
69
|
+
exclude_none=True,
|
|
70
|
+
)
|
|
71
|
+
return _dict
|
|
72
|
+
|
|
73
|
+
@classmethod
|
|
74
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
75
|
+
"""Create an instance of UpdatePriorityModel from a dict"""
|
|
76
|
+
if obj is None:
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
if not isinstance(obj, dict):
|
|
80
|
+
return cls.model_validate(obj)
|
|
81
|
+
|
|
82
|
+
_obj = cls.model_validate({
|
|
83
|
+
"priority": obj.get("priority")
|
|
84
|
+
})
|
|
85
|
+
return _obj
|
|
86
|
+
|
|
87
|
+
|
|
@@ -18,7 +18,7 @@ import re # noqa: F401
|
|
|
18
18
|
import json
|
|
19
19
|
|
|
20
20
|
from datetime import datetime
|
|
21
|
-
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
|
|
21
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr, field_validator
|
|
22
22
|
from typing import Any, ClassVar, Dict, List
|
|
23
23
|
from typing import Optional, Set
|
|
24
24
|
from typing_extensions import Self
|
|
@@ -29,11 +29,38 @@ class ValidationSetModel(BaseModel):
|
|
|
29
29
|
""" # noqa: E501
|
|
30
30
|
id: StrictStr
|
|
31
31
|
name: StrictStr
|
|
32
|
+
asset_type: List[StrictStr] = Field(alias="assetType")
|
|
33
|
+
modality: List[StrictStr]
|
|
34
|
+
prompt_type: List[StrictStr] = Field(alias="promptType")
|
|
32
35
|
is_public: StrictBool = Field(alias="isPublic")
|
|
33
36
|
owner_id: StrictStr = Field(alias="ownerId")
|
|
34
37
|
owner_mail: StrictStr = Field(alias="ownerMail")
|
|
35
38
|
created_at: datetime = Field(alias="createdAt")
|
|
36
|
-
__properties: ClassVar[List[str]] = ["id", "name", "isPublic", "ownerId", "ownerMail", "createdAt"]
|
|
39
|
+
__properties: ClassVar[List[str]] = ["id", "name", "assetType", "modality", "promptType", "isPublic", "ownerId", "ownerMail", "createdAt"]
|
|
40
|
+
|
|
41
|
+
@field_validator('asset_type')
|
|
42
|
+
def asset_type_validate_enum(cls, value):
|
|
43
|
+
"""Validates the enum"""
|
|
44
|
+
for i in value:
|
|
45
|
+
if i not in set(['None', 'Image', 'Video', 'Audio', 'Text']):
|
|
46
|
+
raise ValueError("each list item must be one of ('None', 'Image', 'Video', 'Audio', 'Text')")
|
|
47
|
+
return value
|
|
48
|
+
|
|
49
|
+
@field_validator('modality')
|
|
50
|
+
def modality_validate_enum(cls, value):
|
|
51
|
+
"""Validates the enum"""
|
|
52
|
+
for i in value:
|
|
53
|
+
if i not in set(['None', 'BoundingBox', 'Classify', 'Compare', 'FreeText', 'Line', 'Locate', 'NamedEntity', 'Polygon', 'Scrub', 'Transcription']):
|
|
54
|
+
raise ValueError("each list item must be one of ('None', 'BoundingBox', 'Classify', 'Compare', 'FreeText', 'Line', 'Locate', 'NamedEntity', 'Polygon', 'Scrub', 'Transcription')")
|
|
55
|
+
return value
|
|
56
|
+
|
|
57
|
+
@field_validator('prompt_type')
|
|
58
|
+
def prompt_type_validate_enum(cls, value):
|
|
59
|
+
"""Validates the enum"""
|
|
60
|
+
for i in value:
|
|
61
|
+
if i not in set(['None', 'Text', 'Asset']):
|
|
62
|
+
raise ValueError("each list item must be one of ('None', 'Text', 'Asset')")
|
|
63
|
+
return value
|
|
37
64
|
|
|
38
65
|
model_config = ConfigDict(
|
|
39
66
|
populate_by_name=True,
|
|
@@ -88,6 +115,9 @@ class ValidationSetModel(BaseModel):
|
|
|
88
115
|
_obj = cls.model_validate({
|
|
89
116
|
"id": obj.get("id"),
|
|
90
117
|
"name": obj.get("name"),
|
|
118
|
+
"assetType": obj.get("assetType"),
|
|
119
|
+
"modality": obj.get("modality"),
|
|
120
|
+
"promptType": obj.get("promptType"),
|
|
91
121
|
"isPublic": obj.get("isPublic"),
|
|
92
122
|
"ownerId": obj.get("ownerId"),
|
|
93
123
|
"ownerMail": obj.get("ownerMail"),
|