rapidata 2.38.0__py3-none-any.whl → 2.39.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rapidata might be problematic. Click here for more details.
- rapidata/__init__.py +1 -1
- rapidata/api_client/__init__.py +4 -5
- rapidata/api_client/api/benchmark_api.py +289 -3
- rapidata/api_client/api/leaderboard_api.py +35 -1
- rapidata/api_client/api/participant_api.py +289 -3
- rapidata/api_client/api/validation_set_api.py +119 -400
- rapidata/api_client/models/__init__.py +4 -5
- rapidata/api_client/models/ab_test_selection_a_inner.py +1 -1
- rapidata/api_client/models/compare_workflow_model1.py +1 -8
- rapidata/api_client/models/conditional_validation_selection.py +4 -9
- rapidata/api_client/models/confidence_interval.py +98 -0
- rapidata/api_client/models/create_simple_pipeline_model_pipeline_steps_inner.py +8 -22
- rapidata/api_client/models/get_standing_by_id_result.py +7 -2
- rapidata/api_client/models/get_validation_set_by_id_result.py +4 -2
- rapidata/api_client/models/simple_workflow_model1.py +1 -8
- rapidata/api_client/models/standing_by_leaderboard.py +10 -4
- rapidata/api_client/models/update_benchmark_model.py +87 -0
- rapidata/api_client/models/update_participant_model.py +87 -0
- rapidata/api_client/models/update_validation_set_model.py +93 -0
- rapidata/api_client/models/validation_chance.py +20 -3
- rapidata/api_client/models/validation_set_model.py +5 -42
- rapidata/api_client_README.md +7 -7
- rapidata/rapidata_client/benchmark/rapidata_benchmark.py +53 -22
- rapidata/rapidata_client/benchmark/rapidata_benchmark_manager.py +60 -23
- rapidata/rapidata_client/config/logging_config.py +1 -1
- rapidata/rapidata_client/config/order_config.py +1 -1
- rapidata/rapidata_client/config/rapidata_config.py +1 -1
- rapidata/rapidata_client/config/upload_config.py +1 -1
- rapidata/rapidata_client/datapoints/assets/__init__.py +1 -0
- rapidata/rapidata_client/datapoints/assets/_base_asset.py +2 -0
- rapidata/rapidata_client/datapoints/assets/_text_asset.py +2 -2
- rapidata/rapidata_client/datapoints/assets/data_type_enum.py +1 -1
- rapidata/rapidata_client/datapoints/metadata/_media_asset_metadata.py +9 -8
- rapidata/rapidata_client/datapoints/metadata/_prompt_metadata.py +1 -2
- rapidata/rapidata_client/filter/models/age_group.py +4 -4
- rapidata/rapidata_client/filter/models/gender.py +4 -2
- rapidata/rapidata_client/filter/rapidata_filters.py +12 -9
- rapidata/rapidata_client/referee/__init__.py +1 -1
- rapidata/rapidata_client/referee/_base_referee.py +3 -1
- rapidata/rapidata_client/referee/_early_stopping_referee.py +2 -2
- rapidata/rapidata_client/selection/ab_test_selection.py +7 -3
- rapidata/rapidata_client/selection/capped_selection.py +2 -2
- rapidata/rapidata_client/selection/conditional_validation_selection.py +12 -6
- rapidata/rapidata_client/selection/demographic_selection.py +9 -6
- rapidata/rapidata_client/selection/rapidata_selections.py +11 -8
- rapidata/rapidata_client/selection/shuffling_selection.py +5 -5
- rapidata/rapidata_client/selection/validation_selection.py +9 -5
- rapidata/rapidata_client/settings/allow_neither_both.py +1 -0
- rapidata/rapidata_client/settings/custom_setting.py +3 -2
- rapidata/rapidata_client/settings/models/translation_behaviour_options.py +3 -2
- rapidata/rapidata_client/settings/no_shuffle.py +4 -2
- rapidata/rapidata_client/settings/play_video_until_the_end.py +7 -4
- rapidata/rapidata_client/settings/rapidata_settings.py +4 -3
- rapidata/rapidata_client/settings/translation_behaviour.py +7 -5
- rapidata/rapidata_client/validation/rapidata_validation_set.py +7 -5
- rapidata/rapidata_client/validation/rapids/box.py +3 -1
- rapidata/rapidata_client/validation/rapids/rapids_manager.py +174 -141
- rapidata/rapidata_client/workflow/__init__.py +1 -1
- rapidata/service/__init__.py +1 -1
- rapidata/service/local_file_service.py +9 -8
- {rapidata-2.38.0.dist-info → rapidata-2.39.0.dist-info}/METADATA +1 -1
- {rapidata-2.38.0.dist-info → rapidata-2.39.0.dist-info}/RECORD +64 -60
- {rapidata-2.38.0.dist-info → rapidata-2.39.0.dist-info}/LICENSE +0 -0
- {rapidata-2.38.0.dist-info → rapidata-2.39.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
# coding: utf-8
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Rapidata.Dataset
|
|
5
|
+
|
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
|
|
7
|
+
|
|
8
|
+
The version of the OpenAPI document: v1
|
|
9
|
+
Generated by OpenAPI Generator (https://openapi-generator.tech)
|
|
10
|
+
|
|
11
|
+
Do not edit the class manually.
|
|
12
|
+
""" # noqa: E501
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
import pprint
|
|
17
|
+
import re # noqa: F401
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
22
|
+
from typing import Optional, Set
|
|
23
|
+
from typing_extensions import Self
|
|
24
|
+
|
|
25
|
+
class UpdateValidationSetModel(BaseModel):
|
|
26
|
+
"""
|
|
27
|
+
Allows for specific updates to a validation set without needing to provide all properties.
|
|
28
|
+
""" # noqa: E501
|
|
29
|
+
name: Optional[StrictStr] = None
|
|
30
|
+
dimensions: Optional[List[StrictStr]] = None
|
|
31
|
+
should_alert: Optional[StrictBool] = Field(default=None, alias="shouldAlert")
|
|
32
|
+
is_public: Optional[StrictBool] = Field(default=None, alias="isPublic")
|
|
33
|
+
__properties: ClassVar[List[str]] = ["name", "dimensions", "shouldAlert", "isPublic"]
|
|
34
|
+
|
|
35
|
+
model_config = ConfigDict(
|
|
36
|
+
populate_by_name=True,
|
|
37
|
+
validate_assignment=True,
|
|
38
|
+
protected_namespaces=(),
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def to_str(self) -> str:
|
|
43
|
+
"""Returns the string representation of the model using alias"""
|
|
44
|
+
return pprint.pformat(self.model_dump(by_alias=True))
|
|
45
|
+
|
|
46
|
+
def to_json(self) -> str:
|
|
47
|
+
"""Returns the JSON representation of the model using alias"""
|
|
48
|
+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
|
|
49
|
+
return json.dumps(self.to_dict())
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
def from_json(cls, json_str: str) -> Optional[Self]:
|
|
53
|
+
"""Create an instance of UpdateValidationSetModel from a JSON string"""
|
|
54
|
+
return cls.from_dict(json.loads(json_str))
|
|
55
|
+
|
|
56
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
57
|
+
"""Return the dictionary representation of the model using alias.
|
|
58
|
+
|
|
59
|
+
This has the following differences from calling pydantic's
|
|
60
|
+
`self.model_dump(by_alias=True)`:
|
|
61
|
+
|
|
62
|
+
* `None` is only added to the output dict for nullable fields that
|
|
63
|
+
were set at model initialization. Other fields with value `None`
|
|
64
|
+
are ignored.
|
|
65
|
+
"""
|
|
66
|
+
excluded_fields: Set[str] = set([
|
|
67
|
+
])
|
|
68
|
+
|
|
69
|
+
_dict = self.model_dump(
|
|
70
|
+
by_alias=True,
|
|
71
|
+
exclude=excluded_fields,
|
|
72
|
+
exclude_none=True,
|
|
73
|
+
)
|
|
74
|
+
return _dict
|
|
75
|
+
|
|
76
|
+
@classmethod
|
|
77
|
+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
|
|
78
|
+
"""Create an instance of UpdateValidationSetModel from a dict"""
|
|
79
|
+
if obj is None:
|
|
80
|
+
return None
|
|
81
|
+
|
|
82
|
+
if not isinstance(obj, dict):
|
|
83
|
+
return cls.model_validate(obj)
|
|
84
|
+
|
|
85
|
+
_obj = cls.model_validate({
|
|
86
|
+
"name": obj.get("name"),
|
|
87
|
+
"dimensions": obj.get("dimensions"),
|
|
88
|
+
"shouldAlert": obj.get("shouldAlert"),
|
|
89
|
+
"isPublic": obj.get("isPublic")
|
|
90
|
+
})
|
|
91
|
+
return _obj
|
|
92
|
+
|
|
93
|
+
|
|
@@ -18,7 +18,7 @@ import re # noqa: F401
|
|
|
18
18
|
import json
|
|
19
19
|
|
|
20
20
|
from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt
|
|
21
|
-
from typing import Any, ClassVar, Dict, List, Union
|
|
21
|
+
from typing import Any, ClassVar, Dict, List, Optional, Union
|
|
22
22
|
from typing import Optional, Set
|
|
23
23
|
from typing_extensions import Self
|
|
24
24
|
|
|
@@ -29,7 +29,8 @@ class ValidationChance(BaseModel):
|
|
|
29
29
|
user_score_threshold: Union[StrictFloat, StrictInt] = Field(alias="userScoreThreshold")
|
|
30
30
|
chance: Union[StrictFloat, StrictInt]
|
|
31
31
|
rapid_count: StrictInt = Field(alias="rapidCount")
|
|
32
|
-
|
|
32
|
+
selections: Optional[List[AbTestSelectionAInner]] = None
|
|
33
|
+
__properties: ClassVar[List[str]] = ["userScoreThreshold", "chance", "rapidCount", "selections"]
|
|
33
34
|
|
|
34
35
|
model_config = ConfigDict(
|
|
35
36
|
populate_by_name=True,
|
|
@@ -70,6 +71,18 @@ class ValidationChance(BaseModel):
|
|
|
70
71
|
exclude=excluded_fields,
|
|
71
72
|
exclude_none=True,
|
|
72
73
|
)
|
|
74
|
+
# override the default output from pydantic by calling `to_dict()` of each item in selections (list)
|
|
75
|
+
_items = []
|
|
76
|
+
if self.selections:
|
|
77
|
+
for _item_selections in self.selections:
|
|
78
|
+
if _item_selections:
|
|
79
|
+
_items.append(_item_selections.to_dict())
|
|
80
|
+
_dict['selections'] = _items
|
|
81
|
+
# set to None if selections (nullable) is None
|
|
82
|
+
# and model_fields_set contains the field
|
|
83
|
+
if self.selections is None and "selections" in self.model_fields_set:
|
|
84
|
+
_dict['selections'] = None
|
|
85
|
+
|
|
73
86
|
return _dict
|
|
74
87
|
|
|
75
88
|
@classmethod
|
|
@@ -84,8 +97,12 @@ class ValidationChance(BaseModel):
|
|
|
84
97
|
_obj = cls.model_validate({
|
|
85
98
|
"userScoreThreshold": obj.get("userScoreThreshold"),
|
|
86
99
|
"chance": obj.get("chance"),
|
|
87
|
-
"rapidCount": obj.get("rapidCount")
|
|
100
|
+
"rapidCount": obj.get("rapidCount"),
|
|
101
|
+
"selections": [AbTestSelectionAInner.from_dict(_item) for _item in obj["selections"]] if obj.get("selections") is not None else None
|
|
88
102
|
})
|
|
89
103
|
return _obj
|
|
90
104
|
|
|
105
|
+
from rapidata.api_client.models.ab_test_selection_a_inner import AbTestSelectionAInner
|
|
106
|
+
# TODO: Rewrite to not use raise_errors
|
|
107
|
+
ValidationChance.model_rebuild(raise_errors=False)
|
|
91
108
|
|
|
@@ -18,8 +18,8 @@ import re # noqa: F401
|
|
|
18
18
|
import json
|
|
19
19
|
|
|
20
20
|
from datetime import datetime
|
|
21
|
-
from pydantic import BaseModel, ConfigDict, Field,
|
|
22
|
-
from typing import Any, ClassVar, Dict, List
|
|
21
|
+
from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictStr
|
|
22
|
+
from typing import Any, ClassVar, Dict, List
|
|
23
23
|
from typing import Optional, Set
|
|
24
24
|
from typing_extensions import Self
|
|
25
25
|
|
|
@@ -29,46 +29,11 @@ class ValidationSetModel(BaseModel):
|
|
|
29
29
|
""" # noqa: E501
|
|
30
30
|
id: StrictStr
|
|
31
31
|
name: StrictStr
|
|
32
|
-
|
|
33
|
-
modality: Optional[List[StrictStr]] = None
|
|
34
|
-
prompt_type: Optional[List[StrictStr]] = Field(default=None, alias="promptType")
|
|
32
|
+
is_public: StrictBool = Field(alias="isPublic")
|
|
35
33
|
owner_id: StrictStr = Field(alias="ownerId")
|
|
36
34
|
owner_mail: StrictStr = Field(alias="ownerMail")
|
|
37
35
|
created_at: datetime = Field(alias="createdAt")
|
|
38
|
-
__properties: ClassVar[List[str]] = ["id", "name", "
|
|
39
|
-
|
|
40
|
-
@field_validator('asset_type')
|
|
41
|
-
def asset_type_validate_enum(cls, value):
|
|
42
|
-
"""Validates the enum"""
|
|
43
|
-
if value is None:
|
|
44
|
-
return value
|
|
45
|
-
|
|
46
|
-
for i in value:
|
|
47
|
-
if i not in set(['None', 'Image', 'Video', 'Audio', 'Text']):
|
|
48
|
-
raise ValueError("each list item must be one of ('None', 'Image', 'Video', 'Audio', 'Text')")
|
|
49
|
-
return value
|
|
50
|
-
|
|
51
|
-
@field_validator('modality')
|
|
52
|
-
def modality_validate_enum(cls, value):
|
|
53
|
-
"""Validates the enum"""
|
|
54
|
-
if value is None:
|
|
55
|
-
return value
|
|
56
|
-
|
|
57
|
-
for i in value:
|
|
58
|
-
if i not in set(['None', 'BoundingBox', 'Classify', 'Compare', 'FreeText', 'Line', 'Locate', 'NamedEntity', 'Polygon', 'Scrub', 'Transcription']):
|
|
59
|
-
raise ValueError("each list item must be one of ('None', 'BoundingBox', 'Classify', 'Compare', 'FreeText', 'Line', 'Locate', 'NamedEntity', 'Polygon', 'Scrub', 'Transcription')")
|
|
60
|
-
return value
|
|
61
|
-
|
|
62
|
-
@field_validator('prompt_type')
|
|
63
|
-
def prompt_type_validate_enum(cls, value):
|
|
64
|
-
"""Validates the enum"""
|
|
65
|
-
if value is None:
|
|
66
|
-
return value
|
|
67
|
-
|
|
68
|
-
for i in value:
|
|
69
|
-
if i not in set(['None', 'Text', 'Asset']):
|
|
70
|
-
raise ValueError("each list item must be one of ('None', 'Text', 'Asset')")
|
|
71
|
-
return value
|
|
36
|
+
__properties: ClassVar[List[str]] = ["id", "name", "isPublic", "ownerId", "ownerMail", "createdAt"]
|
|
72
37
|
|
|
73
38
|
model_config = ConfigDict(
|
|
74
39
|
populate_by_name=True,
|
|
@@ -123,9 +88,7 @@ class ValidationSetModel(BaseModel):
|
|
|
123
88
|
_obj = cls.model_validate({
|
|
124
89
|
"id": obj.get("id"),
|
|
125
90
|
"name": obj.get("name"),
|
|
126
|
-
"
|
|
127
|
-
"modality": obj.get("modality"),
|
|
128
|
-
"promptType": obj.get("promptType"),
|
|
91
|
+
"isPublic": obj.get("isPublic"),
|
|
129
92
|
"ownerId": obj.get("ownerId"),
|
|
130
93
|
"ownerMail": obj.get("ownerMail"),
|
|
131
94
|
"createdAt": obj.get("createdAt")
|
rapidata/api_client_README.md
CHANGED
|
@@ -79,6 +79,7 @@ Class | Method | HTTP request | Description
|
|
|
79
79
|
*BenchmarkApi* | [**benchmark_benchmark_id_participants_get**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_participants_get) | **GET** /benchmark/{benchmarkId}/participants | Query all participants within a benchmark
|
|
80
80
|
*BenchmarkApi* | [**benchmark_benchmark_id_participants_participant_id_submit_post**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_participants_participant_id_submit_post) | **POST** /benchmark/{benchmarkId}/participants/{participantId}/submit | Submits a participant to a benchmark.
|
|
81
81
|
*BenchmarkApi* | [**benchmark_benchmark_id_participants_post**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_participants_post) | **POST** /benchmark/{benchmarkId}/participants | Creates a participant in a benchmark.
|
|
82
|
+
*BenchmarkApi* | [**benchmark_benchmark_id_patch**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_patch) | **PATCH** /benchmark/{benchmarkId} | Updates a benchmark using patch semantics.
|
|
82
83
|
*BenchmarkApi* | [**benchmark_benchmark_id_prompt_post**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_prompt_post) | **POST** /benchmark/{benchmarkId}/prompt | Adds a new prompt to a benchmark.
|
|
83
84
|
*BenchmarkApi* | [**benchmark_benchmark_id_prompts_get**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_prompts_get) | **GET** /benchmark/{benchmarkId}/prompts | Returns the paged prompts of a benchmark by its ID.
|
|
84
85
|
*BenchmarkApi* | [**benchmark_benchmark_id_samples_identifier_get**](rapidata/api_client/docs/BenchmarkApi.md#benchmark_benchmark_id_samples_identifier_get) | **GET** /benchmark/{benchmarkId}/samples/{identifier} | Returns the paged prompts of a benchmark by its ID.
|
|
@@ -170,6 +171,7 @@ Class | Method | HTTP request | Description
|
|
|
170
171
|
*ParticipantApi* | [**participant_participant_id_disable_post**](rapidata/api_client/docs/ParticipantApi.md#participant_participant_id_disable_post) | **POST** /participant/{participantId}/disable | This endpoint disables a participant in a benchmark. this means that the participant will no longer actively be matched up against other participants and not collect further results. It will still be visible in the leaderboard.
|
|
171
172
|
*ParticipantApi* | [**participant_participant_id_get**](rapidata/api_client/docs/ParticipantApi.md#participant_participant_id_get) | **GET** /participant/{participantId} | Gets a participant by it's Id.
|
|
172
173
|
*ParticipantApi* | [**participant_participant_id_name_put**](rapidata/api_client/docs/ParticipantApi.md#participant_participant_id_name_put) | **PUT** /participant/{participantId}/name | Updates the name of a participant
|
|
174
|
+
*ParticipantApi* | [**participant_participant_id_patch**](rapidata/api_client/docs/ParticipantApi.md#participant_participant_id_patch) | **PATCH** /participant/{participantId} | Updates a participant using patch semantics.
|
|
173
175
|
*ParticipantApi* | [**participant_participant_id_sample_post**](rapidata/api_client/docs/ParticipantApi.md#participant_participant_id_sample_post) | **POST** /participant/{participantId}/sample | Adds a sample to a participant.
|
|
174
176
|
*ParticipantApi* | [**participant_participant_id_samples_get**](rapidata/api_client/docs/ParticipantApi.md#participant_participant_id_samples_get) | **GET** /participant/{participantId}/samples | Queries all samples of a participant.
|
|
175
177
|
*ParticipantApi* | [**participant_sample_sample_id_delete**](rapidata/api_client/docs/ParticipantApi.md#participant_sample_sample_id_delete) | **DELETE** /participant-sample/{sampleId} | Deletes a sample.
|
|
@@ -193,13 +195,12 @@ Class | Method | HTTP request | Description
|
|
|
193
195
|
*ValidationSetApi* | [**validation_set_recommended_get**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_recommended_get) | **GET** /validation-set/recommended | Gets a validation set that is available to the user and best matches the provided parameters.
|
|
194
196
|
*ValidationSetApi* | [**validation_set_validation_set_id_delete**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_validation_set_id_delete) | **DELETE** /validation-set/{validationSetId} | Gets a validation set by the id.
|
|
195
197
|
*ValidationSetApi* | [**validation_set_validation_set_id_dimensions_patch**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_validation_set_id_dimensions_patch) | **PATCH** /validation-set/{validationSetId}/dimensions | Updates the dimensions of all rapids within a validation set.
|
|
196
|
-
*ValidationSetApi* | [**validation_set_validation_set_id_dimensions_put**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_validation_set_id_dimensions_put) | **PUT** /validation-set/{validationSetId}/dimensions | Updates the dimensions of all rapids within a validation set.
|
|
197
198
|
*ValidationSetApi* | [**validation_set_validation_set_id_export_get**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_validation_set_id_export_get) | **GET** /validation-set/{validationSetId}/export | Exports all rapids of a validation-set to a file.
|
|
198
199
|
*ValidationSetApi* | [**validation_set_validation_set_id_get**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_validation_set_id_get) | **GET** /validation-set/{validationSetId} | Gets a validation set by the id.
|
|
200
|
+
*ValidationSetApi* | [**validation_set_validation_set_id_patch**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_validation_set_id_patch) | **PATCH** /validation-set/{validationSetId} | Updates different characteristics of a validation set.
|
|
199
201
|
*ValidationSetApi* | [**validation_set_validation_set_id_rapid_post**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_validation_set_id_rapid_post) | **POST** /validation-set/{validationSetId}/rapid | Adds a new validation rapid to the specified validation set using files to create the assets.
|
|
200
202
|
*ValidationSetApi* | [**validation_set_validation_set_id_rapids_get**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_validation_set_id_rapids_get) | **GET** /validation-set/{validationSetId}/rapids | Queries the validation rapids for a specific validation set.
|
|
201
203
|
*ValidationSetApi* | [**validation_set_validation_set_id_shouldalert_patch**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_validation_set_id_shouldalert_patch) | **PATCH** /validation-set/{validationSetId}/shouldalert | Updates the dimensions of all rapids within a validation set.
|
|
202
|
-
*ValidationSetApi* | [**validation_set_validation_set_id_shouldalert_put**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_validation_set_id_shouldalert_put) | **PUT** /validation-set/{validationSetId}/shouldalert | Updates the of all rapshouldAlert property of all rapids within a validation set.
|
|
203
204
|
*ValidationSetApi* | [**validation_set_validation_set_id_update_labeling_hints_post**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_validation_set_id_update_labeling_hints_post) | **POST** /validation-set/{validationSetId}/update-labeling-hints | Refreshes the labeling hints for a validation set.
|
|
204
205
|
*ValidationSetApi* | [**validation_set_validation_set_id_visibility_patch**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_validation_set_id_visibility_patch) | **PATCH** /validation-set/{validationSetId}/visibility | Updates the visibility of a validation set.
|
|
205
206
|
*ValidationSetApi* | [**validation_set_zip_compare_post**](rapidata/api_client/docs/ValidationSetApi.md#validation_set_zip_compare_post) | **POST** /validation-set/zip/compare | Imports a compare validation set from a zip file.
|
|
@@ -257,7 +258,6 @@ Class | Method | HTTP request | Description
|
|
|
257
258
|
- [CappedSelection](rapidata/api_client/docs/CappedSelection.md)
|
|
258
259
|
- [ChangeBoostModel](rapidata/api_client/docs/ChangeBoostModel.md)
|
|
259
260
|
- [ClassificationMetadata](rapidata/api_client/docs/ClassificationMetadata.md)
|
|
260
|
-
- [ClassificationMetadataFilterConfig](rapidata/api_client/docs/ClassificationMetadataFilterConfig.md)
|
|
261
261
|
- [ClassificationMetadataModel](rapidata/api_client/docs/ClassificationMetadataModel.md)
|
|
262
262
|
- [ClassifyPayload](rapidata/api_client/docs/ClassifyPayload.md)
|
|
263
263
|
- [ClientModel](rapidata/api_client/docs/ClientModel.md)
|
|
@@ -282,8 +282,8 @@ Class | Method | HTTP request | Description
|
|
|
282
282
|
- [CompareWorkflowModelPairMakerConfig](rapidata/api_client/docs/CompareWorkflowModelPairMakerConfig.md)
|
|
283
283
|
- [ComparisonOperator](rapidata/api_client/docs/ComparisonOperator.md)
|
|
284
284
|
- [ConditionalValidationSelection](rapidata/api_client/docs/ConditionalValidationSelection.md)
|
|
285
|
+
- [ConfidenceInterval](rapidata/api_client/docs/ConfidenceInterval.md)
|
|
285
286
|
- [Coordinate](rapidata/api_client/docs/Coordinate.md)
|
|
286
|
-
- [CountClassificationMetadataFilterConfig](rapidata/api_client/docs/CountClassificationMetadataFilterConfig.md)
|
|
287
287
|
- [CountMetadata](rapidata/api_client/docs/CountMetadata.md)
|
|
288
288
|
- [CountMetadataModel](rapidata/api_client/docs/CountMetadataModel.md)
|
|
289
289
|
- [CountryFilter](rapidata/api_client/docs/CountryFilter.md)
|
|
@@ -427,7 +427,6 @@ Class | Method | HTTP request | Description
|
|
|
427
427
|
- [LocateRapidBlueprint](rapidata/api_client/docs/LocateRapidBlueprint.md)
|
|
428
428
|
- [LocateResult](rapidata/api_client/docs/LocateResult.md)
|
|
429
429
|
- [LocationMetadata](rapidata/api_client/docs/LocationMetadata.md)
|
|
430
|
-
- [LocationMetadataExistsFilterConfig](rapidata/api_client/docs/LocationMetadataExistsFilterConfig.md)
|
|
431
430
|
- [LocationMetadataModel](rapidata/api_client/docs/LocationMetadataModel.md)
|
|
432
431
|
- [LogicOperator](rapidata/api_client/docs/LogicOperator.md)
|
|
433
432
|
- [MultiAsset](rapidata/api_client/docs/MultiAsset.md)
|
|
@@ -577,6 +576,7 @@ Class | Method | HTTP request | Description
|
|
|
577
576
|
- [TranslatedPromptMetadataModel](rapidata/api_client/docs/TranslatedPromptMetadataModel.md)
|
|
578
577
|
- [TranslatedString](rapidata/api_client/docs/TranslatedString.md)
|
|
579
578
|
- [UnlockOrderResult](rapidata/api_client/docs/UnlockOrderResult.md)
|
|
579
|
+
- [UpdateBenchmarkModel](rapidata/api_client/docs/UpdateBenchmarkModel.md)
|
|
580
580
|
- [UpdateBenchmarkNameModel](rapidata/api_client/docs/UpdateBenchmarkNameModel.md)
|
|
581
581
|
- [UpdateDatasetNameModel](rapidata/api_client/docs/UpdateDatasetNameModel.md)
|
|
582
582
|
- [UpdateDimensionsModel](rapidata/api_client/docs/UpdateDimensionsModel.md)
|
|
@@ -584,11 +584,13 @@ Class | Method | HTTP request | Description
|
|
|
584
584
|
- [UpdateLeaderboardNameModel](rapidata/api_client/docs/UpdateLeaderboardNameModel.md)
|
|
585
585
|
- [UpdateLeaderboardResponseConfigModel](rapidata/api_client/docs/UpdateLeaderboardResponseConfigModel.md)
|
|
586
586
|
- [UpdateOrderNameModel](rapidata/api_client/docs/UpdateOrderNameModel.md)
|
|
587
|
+
- [UpdateParticipantModel](rapidata/api_client/docs/UpdateParticipantModel.md)
|
|
587
588
|
- [UpdateParticipantNameModel](rapidata/api_client/docs/UpdateParticipantNameModel.md)
|
|
588
589
|
- [UpdatePromptTagsModel](rapidata/api_client/docs/UpdatePromptTagsModel.md)
|
|
589
590
|
- [UpdateShouldAlertModel](rapidata/api_client/docs/UpdateShouldAlertModel.md)
|
|
590
591
|
- [UpdateValidationRapidModel](rapidata/api_client/docs/UpdateValidationRapidModel.md)
|
|
591
592
|
- [UpdateValidationRapidModelTruth](rapidata/api_client/docs/UpdateValidationRapidModelTruth.md)
|
|
593
|
+
- [UpdateValidationSetModel](rapidata/api_client/docs/UpdateValidationSetModel.md)
|
|
592
594
|
- [UploadCocoResult](rapidata/api_client/docs/UploadCocoResult.md)
|
|
593
595
|
- [UploadFromS3Result](rapidata/api_client/docs/UploadFromS3Result.md)
|
|
594
596
|
- [UrlAssetInput](rapidata/api_client/docs/UrlAssetInput.md)
|
|
@@ -609,8 +611,6 @@ Class | Method | HTTP request | Description
|
|
|
609
611
|
- [WorkflowConfigArtifactModel](rapidata/api_client/docs/WorkflowConfigArtifactModel.md)
|
|
610
612
|
- [WorkflowConfigArtifactModelWorkflowConfig](rapidata/api_client/docs/WorkflowConfigArtifactModelWorkflowConfig.md)
|
|
611
613
|
- [WorkflowLabelingStepModel](rapidata/api_client/docs/WorkflowLabelingStepModel.md)
|
|
612
|
-
- [WorkflowSplitModel](rapidata/api_client/docs/WorkflowSplitModel.md)
|
|
613
|
-
- [WorkflowSplitModelFilterConfigsInner](rapidata/api_client/docs/WorkflowSplitModelFilterConfigsInner.md)
|
|
614
614
|
- [WorkflowState](rapidata/api_client/docs/WorkflowState.md)
|
|
615
615
|
- [ZipEntryFileWrapper](rapidata/api_client/docs/ZipEntryFileWrapper.md)
|
|
616
616
|
|
|
@@ -206,43 +206,54 @@ class RapidataBenchmark:
|
|
|
206
206
|
|
|
207
207
|
def add_prompt(
|
|
208
208
|
self,
|
|
209
|
-
identifier: str,
|
|
209
|
+
identifier: str | None = None,
|
|
210
210
|
prompt: str | None = None,
|
|
211
|
-
|
|
211
|
+
prompt_asset: str | None = None,
|
|
212
212
|
tags: Optional[list[str]] = None,
|
|
213
213
|
):
|
|
214
214
|
"""
|
|
215
215
|
Adds a prompt to the benchmark.
|
|
216
216
|
|
|
217
217
|
Args:
|
|
218
|
-
identifier: The identifier of the prompt/asset/tags that will be used to match up the media.
|
|
218
|
+
identifier: The identifier of the prompt/asset/tags that will be used to match up the media. If not provided, it will use the prompt, asset or prompt + asset as the identifier.
|
|
219
219
|
prompt: The prompt that will be used to evaluate the model.
|
|
220
|
-
|
|
220
|
+
prompt_asset: The prompt asset that will be used to evaluate the model. Provided as a link to the asset.
|
|
221
221
|
tags: The tags can be used to filter the leaderboard results. They will NOT be shown to the users.
|
|
222
222
|
"""
|
|
223
223
|
with tracer.start_as_current_span("RapidataBenchmark.add_prompt"):
|
|
224
224
|
if tags is None:
|
|
225
225
|
tags = []
|
|
226
226
|
|
|
227
|
-
if
|
|
228
|
-
raise ValueError("
|
|
227
|
+
if prompt is None and prompt_asset is None:
|
|
228
|
+
raise ValueError("Prompt or prompt asset must be provided.")
|
|
229
|
+
|
|
230
|
+
if identifier is None and prompt is None:
|
|
231
|
+
raise ValueError("Identifier or prompt must be provided.")
|
|
229
232
|
|
|
230
|
-
if
|
|
231
|
-
raise ValueError("
|
|
233
|
+
if identifier and not isinstance(identifier, str):
|
|
234
|
+
raise ValueError("Identifier must be a string.")
|
|
232
235
|
|
|
233
|
-
if prompt
|
|
236
|
+
if prompt and not isinstance(prompt, str):
|
|
234
237
|
raise ValueError("Prompt must be a string.")
|
|
235
238
|
|
|
236
|
-
if
|
|
239
|
+
if prompt_asset and not isinstance(prompt_asset, str):
|
|
237
240
|
raise ValueError(
|
|
238
241
|
"Asset must be a string. That is the link to the asset."
|
|
239
242
|
)
|
|
240
243
|
|
|
244
|
+
if identifier is None:
|
|
245
|
+
assert prompt is not None
|
|
246
|
+
if prompt in self.prompts:
|
|
247
|
+
raise ValueError(
|
|
248
|
+
"Prompts must be unique. Otherwise use identifiers."
|
|
249
|
+
)
|
|
250
|
+
identifier = prompt
|
|
251
|
+
|
|
241
252
|
if identifier in self.identifiers:
|
|
242
253
|
raise ValueError("Identifier already exists in the benchmark.")
|
|
243
254
|
|
|
244
|
-
if
|
|
245
|
-
raise ValueError("
|
|
255
|
+
if prompt_asset is not None and not re.match(r"^https?://", prompt_asset):
|
|
256
|
+
raise ValueError("Prompt asset must be a link to the asset.")
|
|
246
257
|
|
|
247
258
|
if tags is not None and (
|
|
248
259
|
not isinstance(tags, list)
|
|
@@ -251,10 +262,10 @@ class RapidataBenchmark:
|
|
|
251
262
|
raise ValueError("Tags must be a list of strings.")
|
|
252
263
|
|
|
253
264
|
logger.info(
|
|
254
|
-
"Adding identifier %s with prompt %s, asset %s and tags %s to benchmark %s",
|
|
265
|
+
"Adding identifier %s with prompt %s, prompt asset %s and tags %s to benchmark %s",
|
|
255
266
|
identifier,
|
|
256
267
|
prompt,
|
|
257
|
-
|
|
268
|
+
prompt_asset,
|
|
258
269
|
tags,
|
|
259
270
|
self.id,
|
|
260
271
|
)
|
|
@@ -263,7 +274,7 @@ class RapidataBenchmark:
|
|
|
263
274
|
|
|
264
275
|
self.__tags.append(tags)
|
|
265
276
|
self.__prompts.append(prompt)
|
|
266
|
-
self.__prompt_assets.append(
|
|
277
|
+
self.__prompt_assets.append(prompt_asset)
|
|
267
278
|
|
|
268
279
|
self.__openapi_service.benchmark_api.benchmark_benchmark_id_prompt_post(
|
|
269
280
|
benchmark_id=self.id,
|
|
@@ -272,9 +283,9 @@ class RapidataBenchmark:
|
|
|
272
283
|
prompt=prompt,
|
|
273
284
|
promptAsset=(
|
|
274
285
|
SubmitPromptModelPromptAsset(
|
|
275
|
-
UrlAssetInput(_t="UrlAssetInput", url=
|
|
286
|
+
UrlAssetInput(_t="UrlAssetInput", url=prompt_asset)
|
|
276
287
|
)
|
|
277
|
-
if
|
|
288
|
+
if prompt_asset is not None
|
|
278
289
|
else None
|
|
279
290
|
),
|
|
280
291
|
tags=tags,
|
|
@@ -379,28 +390,48 @@ class RapidataBenchmark:
|
|
|
379
390
|
)
|
|
380
391
|
|
|
381
392
|
def evaluate_model(
|
|
382
|
-
self,
|
|
393
|
+
self,
|
|
394
|
+
name: str,
|
|
395
|
+
media: list[str],
|
|
396
|
+
identifiers: list[str] | None = None,
|
|
397
|
+
prompts: list[str] | None = None,
|
|
383
398
|
) -> None:
|
|
384
399
|
"""
|
|
385
400
|
Evaluates a model on the benchmark across all leaderboards.
|
|
386
401
|
|
|
402
|
+
prompts or identifiers must be provided to match the media.
|
|
403
|
+
|
|
387
404
|
Args:
|
|
388
405
|
name: The name of the model.
|
|
389
406
|
media: The generated images/videos that will be used to evaluate the model.
|
|
390
|
-
identifiers: The identifiers that correspond to the media. The order of the identifiers must match the order of the media
|
|
407
|
+
identifiers: The identifiers that correspond to the media. The order of the identifiers must match the order of the media.\n
|
|
391
408
|
The identifiers that are used must be registered for the benchmark. To see the registered identifiers, use the identifiers property.
|
|
409
|
+
prompts: The prompts that correspond to the media. The order of the prompts must match the order of the media.
|
|
392
410
|
"""
|
|
393
411
|
with tracer.start_as_current_span("evaluate_model"):
|
|
394
412
|
if not media:
|
|
395
413
|
raise ValueError("Media must be a non-empty list of strings")
|
|
396
414
|
|
|
415
|
+
if not identifiers and not prompts:
|
|
416
|
+
raise ValueError("Identifiers or prompts must be provided.")
|
|
417
|
+
|
|
418
|
+
if identifiers and prompts:
|
|
419
|
+
raise ValueError(
|
|
420
|
+
"Identifiers and prompts cannot be provided at the same time. Use one or the other."
|
|
421
|
+
)
|
|
422
|
+
|
|
423
|
+
if not identifiers:
|
|
424
|
+
assert prompts is not None
|
|
425
|
+
identifiers = prompts
|
|
426
|
+
|
|
397
427
|
if len(media) != len(identifiers):
|
|
398
|
-
raise ValueError(
|
|
428
|
+
raise ValueError(
|
|
429
|
+
"Media and identifiers/prompts must have the same length"
|
|
430
|
+
)
|
|
399
431
|
|
|
400
432
|
if not all(identifier in self.identifiers for identifier in identifiers):
|
|
401
433
|
raise ValueError(
|
|
402
|
-
"All identifiers must be in the registered identifiers list. To see the registered identifiers, use the identifiers property
|
|
403
|
-
\nTo see the prompts that are associated with the identifiers, use the prompts property."
|
|
434
|
+
"All identifiers/prompts must be in the registered identifiers/prompts list. To see the registered identifiers/prompts, use the identifiers/prompts property."
|
|
404
435
|
)
|
|
405
436
|
|
|
406
437
|
# happens before the creation of the participant to ensure all media paths are valid
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Optional
|
|
1
|
+
from typing import Optional, cast
|
|
2
2
|
from rapidata.rapidata_client.benchmark.rapidata_benchmark import RapidataBenchmark
|
|
3
3
|
from rapidata.api_client.models.create_benchmark_model import CreateBenchmarkModel
|
|
4
4
|
from rapidata.service.openapi_service import OpenAPIService
|
|
@@ -30,7 +30,7 @@ class RapidataBenchmarkManager:
|
|
|
30
30
|
def create_new_benchmark(
|
|
31
31
|
self,
|
|
32
32
|
name: str,
|
|
33
|
-
identifiers: list[str],
|
|
33
|
+
identifiers: Optional[list[str]] = None,
|
|
34
34
|
prompts: Optional[list[str | None]] = None,
|
|
35
35
|
prompt_assets: Optional[list[str | None]] = None,
|
|
36
36
|
tags: Optional[list[list[str] | None]] = None,
|
|
@@ -39,11 +39,11 @@ class RapidataBenchmarkManager:
|
|
|
39
39
|
Creates a new benchmark with the given name, identifiers, prompts, and media assets.
|
|
40
40
|
Everything is matched up by the indexes of the lists.
|
|
41
41
|
|
|
42
|
-
prompts or
|
|
42
|
+
prompts or identifiers must be provided, as well as prompts or prompt_assets.
|
|
43
43
|
|
|
44
44
|
Args:
|
|
45
45
|
name: The name of the benchmark.
|
|
46
|
-
identifiers: The identifiers of the prompts/assets/tags that will be used to match up the media
|
|
46
|
+
identifiers: The identifiers of the prompts/assets/tags that will be used to match up the media. If not provided, it will use the prompts as the identifiers.
|
|
47
47
|
prompts: The prompts that will be registered for the benchmark.
|
|
48
48
|
prompt_assets: The prompt assets that will be registered for the benchmark.
|
|
49
49
|
tags: The tags that will be associated with the prompts to use for filtering the leaderboard results. They will NOT be shown to the users.
|
|
@@ -81,17 +81,32 @@ class RapidataBenchmarkManager:
|
|
|
81
81
|
):
|
|
82
82
|
raise ValueError("Media assets must be a list of strings or None.")
|
|
83
83
|
|
|
84
|
-
if
|
|
85
|
-
isinstance(
|
|
84
|
+
if identifiers and (
|
|
85
|
+
not isinstance(identifiers, list)
|
|
86
|
+
or not all(isinstance(identifier, str) for identifier in identifiers)
|
|
86
87
|
):
|
|
87
88
|
raise ValueError("Identifiers must be a list of strings.")
|
|
88
89
|
|
|
89
|
-
if
|
|
90
|
-
|
|
90
|
+
if identifiers:
|
|
91
|
+
if not len(set(identifiers)) == len(identifiers):
|
|
92
|
+
raise ValueError("Identifiers must be unique.")
|
|
93
|
+
|
|
94
|
+
if tags is not None:
|
|
95
|
+
if not isinstance(tags, list):
|
|
96
|
+
raise ValueError("Tags must be a list of lists of strings or None.")
|
|
97
|
+
|
|
98
|
+
for tag in tags:
|
|
99
|
+
if tag is not None and (
|
|
100
|
+
not isinstance(tag, list)
|
|
101
|
+
or not all(isinstance(item, str) for item in tag)
|
|
102
|
+
):
|
|
103
|
+
raise ValueError(
|
|
104
|
+
"Tags must be a list of lists of strings or None."
|
|
105
|
+
)
|
|
91
106
|
|
|
92
|
-
if
|
|
107
|
+
if not identifiers and not prompts:
|
|
93
108
|
raise ValueError(
|
|
94
|
-
"
|
|
109
|
+
"At least one of identifiers or prompts must be provided."
|
|
95
110
|
)
|
|
96
111
|
|
|
97
112
|
if not prompts and not prompt_assets:
|
|
@@ -99,11 +114,41 @@ class RapidataBenchmarkManager:
|
|
|
99
114
|
"At least one of prompts or media assets must be provided."
|
|
100
115
|
)
|
|
101
116
|
|
|
102
|
-
if
|
|
103
|
-
|
|
117
|
+
if not identifiers:
|
|
118
|
+
assert prompts is not None
|
|
119
|
+
if not len(set(prompts)) == len(prompts):
|
|
120
|
+
raise ValueError(
|
|
121
|
+
"Prompts must be unique. Otherwise use identifiers."
|
|
122
|
+
)
|
|
123
|
+
if any(prompt is None for prompt in prompts):
|
|
124
|
+
raise ValueError(
|
|
125
|
+
"Prompts must not be None. Otherwise use identifiers."
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
identifiers = cast(list[str], prompts)
|
|
129
|
+
|
|
130
|
+
assert identifiers is not None
|
|
131
|
+
|
|
132
|
+
expected_length = len(identifiers)
|
|
133
|
+
|
|
134
|
+
if not prompts:
|
|
135
|
+
prompts = cast(list[str | None], [None] * expected_length)
|
|
136
|
+
|
|
137
|
+
if not prompt_assets:
|
|
138
|
+
prompt_assets = cast(list[str | None], [None] * expected_length)
|
|
104
139
|
|
|
105
|
-
if
|
|
106
|
-
|
|
140
|
+
if not tags:
|
|
141
|
+
tags = cast(list[list[str] | None], [None] * expected_length)
|
|
142
|
+
|
|
143
|
+
# At this point, all variables are guaranteed to be lists, not None
|
|
144
|
+
assert prompts is not None
|
|
145
|
+
assert prompt_assets is not None
|
|
146
|
+
assert tags is not None
|
|
147
|
+
|
|
148
|
+
if not (expected_length == len(prompts) == len(prompt_assets) == len(tags)):
|
|
149
|
+
raise ValueError(
|
|
150
|
+
"Identifiers, prompts, media assets, and tags must have the same length or set to None."
|
|
151
|
+
)
|
|
107
152
|
|
|
108
153
|
logger.info("Creating new benchmark %s", name)
|
|
109
154
|
|
|
@@ -119,16 +164,8 @@ class RapidataBenchmarkManager:
|
|
|
119
164
|
name, benchmark_result.id, self.__openapi_service
|
|
120
165
|
)
|
|
121
166
|
|
|
122
|
-
prompts_list = prompts if prompts is not None else [None] * len(identifiers)
|
|
123
|
-
media_assets_list = (
|
|
124
|
-
prompt_assets
|
|
125
|
-
if prompt_assets is not None
|
|
126
|
-
else [None] * len(identifiers)
|
|
127
|
-
)
|
|
128
|
-
tags_list = tags if tags is not None else [None] * len(identifiers)
|
|
129
|
-
|
|
130
167
|
for identifier, prompt, asset, tag in zip(
|
|
131
|
-
identifiers,
|
|
168
|
+
identifiers, prompts, prompt_assets, tags
|
|
132
169
|
):
|
|
133
170
|
benchmark.add_prompt(identifier, prompt, asset, tag)
|
|
134
171
|
|
|
@@ -23,7 +23,7 @@ class LoggingConfig(BaseModel):
|
|
|
23
23
|
"""
|
|
24
24
|
Holds the configuration for the logging process.
|
|
25
25
|
|
|
26
|
-
|
|
26
|
+
Attributes:
|
|
27
27
|
level (str): The logging level. Defaults to "WARNING".
|
|
28
28
|
log_file (str | None): The logging file. Defaults to None.
|
|
29
29
|
format (str): The logging format. Defaults to "%(asctime)s - %(name)s - %(levelname)s - %(message)s".
|
|
@@ -5,7 +5,7 @@ class OrderConfig(BaseModel):
|
|
|
5
5
|
"""
|
|
6
6
|
Holds the configuration for the order process.
|
|
7
7
|
|
|
8
|
-
|
|
8
|
+
Attributes:
|
|
9
9
|
minOrderDatapointsForValidation (int): The minimum number of datapoints required so that an automatic validationset gets created if no recommended was found. Defaults to 50.
|
|
10
10
|
autoValidationSetSize (int): The maximum size of the auto-generated validation set. Defaults to 20.
|
|
11
11
|
"""
|
|
@@ -11,7 +11,7 @@ class RapidataConfig(BaseModel):
|
|
|
11
11
|
|
|
12
12
|
To adjust the configurations used, you can modify the `rapidata_config` object.
|
|
13
13
|
|
|
14
|
-
|
|
14
|
+
Attributes:
|
|
15
15
|
enableBetaFeatures (bool): Whether to enable beta features. Defaults to False.
|
|
16
16
|
upload (UploadConfig): The configuration for the upload process.
|
|
17
17
|
Such as the maximum number of worker threads for processing media paths and the maximum number of retries for failed uploads.
|
|
@@ -5,7 +5,7 @@ class UploadConfig(BaseModel):
|
|
|
5
5
|
"""
|
|
6
6
|
Holds the configuration for the upload process.
|
|
7
7
|
|
|
8
|
-
|
|
8
|
+
Attributes:
|
|
9
9
|
maxWorkers (int): The maximum number of worker threads for processing media paths. Defaults to 10.
|
|
10
10
|
maxRetries (int): The maximum number of retries for failed uploads. Defaults to 3.
|
|
11
11
|
"""
|