rapidata 2.27.3__py3-none-any.whl → 2.27.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rapidata might be problematic. Click here for more details.

Files changed (28) hide show
  1. rapidata/__init__.py +1 -1
  2. rapidata/api_client/__init__.py +10 -2
  3. rapidata/api_client/api/campaign_api.py +247 -0
  4. rapidata/api_client/api/datapoint_api.py +0 -267
  5. rapidata/api_client/api/dataset_api.py +24 -6
  6. rapidata/api_client/api/validation_set_api.py +431 -2
  7. rapidata/api_client/models/__init__.py +10 -2
  8. rapidata/api_client/models/ab_test_selection_a_inner.py +26 -12
  9. rapidata/api_client/models/boost_query_result.py +100 -0
  10. rapidata/api_client/models/boost_status.py +39 -0
  11. rapidata/api_client/models/compare_workflow_config_metadata_value.py +38 -10
  12. rapidata/api_client/models/datapoint_model_paged_result.py +105 -0
  13. rapidata/api_client/models/effort_capped_selection.py +115 -0
  14. rapidata/api_client/models/file_asset_model_metadata_value.py +39 -11
  15. rapidata/api_client/models/get_validation_rapids_result.py +3 -3
  16. rapidata/api_client/models/get_workflow_results_result.py +3 -3
  17. rapidata/api_client/models/rapid_model.py +3 -3
  18. rapidata/api_client/models/streams_metadata.py +102 -0
  19. rapidata/api_client/models/streams_metadata_model.py +100 -0
  20. rapidata/api_client/models/validation_set_validation_set_id_rapid_post_payload_parameter.py +252 -0
  21. rapidata/api_client/models/validation_set_validation_set_id_rapid_post_truth_parameter.py +280 -0
  22. rapidata/api_client/models/video_duration_metadata.py +98 -0
  23. rapidata/api_client/models/video_duration_metadata_model.py +96 -0
  24. rapidata/api_client_README.md +17 -7
  25. {rapidata-2.27.3.dist-info → rapidata-2.27.4.dist-info}/METADATA +2 -2
  26. {rapidata-2.27.3.dist-info → rapidata-2.27.4.dist-info}/RECORD +28 -18
  27. {rapidata-2.27.3.dist-info → rapidata-2.27.4.dist-info}/LICENSE +0 -0
  28. {rapidata-2.27.3.dist-info → rapidata-2.27.4.dist-info}/WHEEL +0 -0
@@ -19,6 +19,7 @@ from pydantic import BaseModel, ConfigDict, Field, StrictStr, ValidationError, f
19
19
  from typing import Any, List, Optional
20
20
  from rapidata.api_client.models.conditional_validation_selection import ConditionalValidationSelection
21
21
  from rapidata.api_client.models.demographic_selection import DemographicSelection
22
+ from rapidata.api_client.models.effort_capped_selection import EffortCappedSelection
22
23
  from rapidata.api_client.models.labeling_selection import LabelingSelection
23
24
  from rapidata.api_client.models.static_selection import StaticSelection
24
25
  from rapidata.api_client.models.validation_selection import ValidationSelection
@@ -26,7 +27,7 @@ from pydantic import StrictStr, Field
26
27
  from typing import Union, List, Set, Optional, Dict
27
28
  from typing_extensions import Literal, Self
28
29
 
29
- ABTESTSELECTIONAINNER_ONE_OF_SCHEMAS = ["AbTestSelection", "CappedSelection", "ConditionalValidationSelection", "DemographicSelection", "LabelingSelection", "ShufflingSelection", "StaticSelection", "ValidationSelection"]
30
+ ABTESTSELECTIONAINNER_ONE_OF_SCHEMAS = ["AbTestSelection", "CappedSelection", "ConditionalValidationSelection", "DemographicSelection", "EffortCappedSelection", "LabelingSelection", "ShufflingSelection", "StaticSelection", "ValidationSelection"]
30
31
 
31
32
  class AbTestSelectionAInner(BaseModel):
32
33
  """
@@ -40,16 +41,18 @@ class AbTestSelectionAInner(BaseModel):
40
41
  oneof_schema_3_validator: Optional[ConditionalValidationSelection] = None
41
42
  # data type: DemographicSelection
42
43
  oneof_schema_4_validator: Optional[DemographicSelection] = None
44
+ # data type: EffortCappedSelection
45
+ oneof_schema_5_validator: Optional[EffortCappedSelection] = None
43
46
  # data type: LabelingSelection
44
- oneof_schema_5_validator: Optional[LabelingSelection] = None
47
+ oneof_schema_6_validator: Optional[LabelingSelection] = None
45
48
  # data type: ShufflingSelection
46
- oneof_schema_6_validator: Optional[ShufflingSelection] = None
49
+ oneof_schema_7_validator: Optional[ShufflingSelection] = None
47
50
  # data type: StaticSelection
48
- oneof_schema_7_validator: Optional[StaticSelection] = None
51
+ oneof_schema_8_validator: Optional[StaticSelection] = None
49
52
  # data type: ValidationSelection
50
- oneof_schema_8_validator: Optional[ValidationSelection] = None
51
- actual_instance: Optional[Union[AbTestSelection, CappedSelection, ConditionalValidationSelection, DemographicSelection, LabelingSelection, ShufflingSelection, StaticSelection, ValidationSelection]] = None
52
- one_of_schemas: Set[str] = { "AbTestSelection", "CappedSelection", "ConditionalValidationSelection", "DemographicSelection", "LabelingSelection", "ShufflingSelection", "StaticSelection", "ValidationSelection" }
53
+ oneof_schema_9_validator: Optional[ValidationSelection] = None
54
+ actual_instance: Optional[Union[AbTestSelection, CappedSelection, ConditionalValidationSelection, DemographicSelection, EffortCappedSelection, LabelingSelection, ShufflingSelection, StaticSelection, ValidationSelection]] = None
55
+ one_of_schemas: Set[str] = { "AbTestSelection", "CappedSelection", "ConditionalValidationSelection", "DemographicSelection", "EffortCappedSelection", "LabelingSelection", "ShufflingSelection", "StaticSelection", "ValidationSelection" }
53
56
 
54
57
  model_config = ConfigDict(
55
58
  validate_assignment=True,
@@ -95,6 +98,11 @@ class AbTestSelectionAInner(BaseModel):
95
98
  error_messages.append(f"Error! Input type `{type(v)}` is not `DemographicSelection`")
96
99
  else:
97
100
  match += 1
101
+ # validate data type: EffortCappedSelection
102
+ if not isinstance(v, EffortCappedSelection):
103
+ error_messages.append(f"Error! Input type `{type(v)}` is not `EffortCappedSelection`")
104
+ else:
105
+ match += 1
98
106
  # validate data type: LabelingSelection
99
107
  if not isinstance(v, LabelingSelection):
100
108
  error_messages.append(f"Error! Input type `{type(v)}` is not `LabelingSelection`")
@@ -117,10 +125,10 @@ class AbTestSelectionAInner(BaseModel):
117
125
  match += 1
118
126
  if match > 1:
119
127
  # more than 1 match
120
- raise ValueError("Multiple matches found when setting `actual_instance` in AbTestSelectionAInner with oneOf schemas: AbTestSelection, CappedSelection, ConditionalValidationSelection, DemographicSelection, LabelingSelection, ShufflingSelection, StaticSelection, ValidationSelection. Details: " + ", ".join(error_messages))
128
+ raise ValueError("Multiple matches found when setting `actual_instance` in AbTestSelectionAInner with oneOf schemas: AbTestSelection, CappedSelection, ConditionalValidationSelection, DemographicSelection, EffortCappedSelection, LabelingSelection, ShufflingSelection, StaticSelection, ValidationSelection. Details: " + ", ".join(error_messages))
121
129
  elif match == 0:
122
130
  # no match
123
- raise ValueError("No match found when setting `actual_instance` in AbTestSelectionAInner with oneOf schemas: AbTestSelection, CappedSelection, ConditionalValidationSelection, DemographicSelection, LabelingSelection, ShufflingSelection, StaticSelection, ValidationSelection. Details: " + ", ".join(error_messages))
131
+ raise ValueError("No match found when setting `actual_instance` in AbTestSelectionAInner with oneOf schemas: AbTestSelection, CappedSelection, ConditionalValidationSelection, DemographicSelection, EffortCappedSelection, LabelingSelection, ShufflingSelection, StaticSelection, ValidationSelection. Details: " + ", ".join(error_messages))
124
132
  else:
125
133
  return v
126
134
 
@@ -159,6 +167,12 @@ class AbTestSelectionAInner(BaseModel):
159
167
  match += 1
160
168
  except (ValidationError, ValueError) as e:
161
169
  error_messages.append(str(e))
170
+ # deserialize data into EffortCappedSelection
171
+ try:
172
+ instance.actual_instance = EffortCappedSelection.from_json(json_str)
173
+ match += 1
174
+ except (ValidationError, ValueError) as e:
175
+ error_messages.append(str(e))
162
176
  # deserialize data into LabelingSelection
163
177
  try:
164
178
  instance.actual_instance = LabelingSelection.from_json(json_str)
@@ -186,10 +200,10 @@ class AbTestSelectionAInner(BaseModel):
186
200
 
187
201
  if match > 1:
188
202
  # more than 1 match
189
- raise ValueError("Multiple matches found when deserializing the JSON string into AbTestSelectionAInner with oneOf schemas: AbTestSelection, CappedSelection, ConditionalValidationSelection, DemographicSelection, LabelingSelection, ShufflingSelection, StaticSelection, ValidationSelection. Details: " + ", ".join(error_messages))
203
+ raise ValueError("Multiple matches found when deserializing the JSON string into AbTestSelectionAInner with oneOf schemas: AbTestSelection, CappedSelection, ConditionalValidationSelection, DemographicSelection, EffortCappedSelection, LabelingSelection, ShufflingSelection, StaticSelection, ValidationSelection. Details: " + ", ".join(error_messages))
190
204
  elif match == 0:
191
205
  # no match
192
- raise ValueError("No match found when deserializing the JSON string into AbTestSelectionAInner with oneOf schemas: AbTestSelection, CappedSelection, ConditionalValidationSelection, DemographicSelection, LabelingSelection, ShufflingSelection, StaticSelection, ValidationSelection. Details: " + ", ".join(error_messages))
206
+ raise ValueError("No match found when deserializing the JSON string into AbTestSelectionAInner with oneOf schemas: AbTestSelection, CappedSelection, ConditionalValidationSelection, DemographicSelection, EffortCappedSelection, LabelingSelection, ShufflingSelection, StaticSelection, ValidationSelection. Details: " + ", ".join(error_messages))
193
207
  else:
194
208
  return instance
195
209
 
@@ -203,7 +217,7 @@ class AbTestSelectionAInner(BaseModel):
203
217
  else:
204
218
  return json.dumps(self.actual_instance)
205
219
 
206
- def to_dict(self) -> Optional[Union[Dict[str, Any], AbTestSelection, CappedSelection, ConditionalValidationSelection, DemographicSelection, LabelingSelection, ShufflingSelection, StaticSelection, ValidationSelection]]:
220
+ def to_dict(self) -> Optional[Union[Dict[str, Any], AbTestSelection, CappedSelection, ConditionalValidationSelection, DemographicSelection, EffortCappedSelection, LabelingSelection, ShufflingSelection, StaticSelection, ValidationSelection]]:
207
221
  """Returns the dict representation of the actual instance"""
208
222
  if self.actual_instance is None:
209
223
  return None
@@ -0,0 +1,100 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Rapidata.Dataset
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: v1
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr, field_validator
21
+ from typing import Any, ClassVar, Dict, List
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class BoostQueryResult(BaseModel):
26
+ """
27
+ BoostQueryResult
28
+ """ # noqa: E501
29
+ status: StrictStr
30
+ active_campaigns: List[StrictStr] = Field(alias="activeCampaigns")
31
+ inactive_campaigns: List[StrictStr] = Field(alias="inactiveCampaigns")
32
+ unknown_campaigns: List[StrictInt] = Field(alias="unknownCampaigns")
33
+ __properties: ClassVar[List[str]] = ["status", "activeCampaigns", "inactiveCampaigns", "unknownCampaigns"]
34
+
35
+ @field_validator('status')
36
+ def status_validate_enum(cls, value):
37
+ """Validates the enum"""
38
+ if value not in set(['Active', 'Inactive', 'Partial', 'Unknown']):
39
+ raise ValueError("must be one of enum values ('Active', 'Inactive', 'Partial', 'Unknown')")
40
+ return value
41
+
42
+ model_config = ConfigDict(
43
+ populate_by_name=True,
44
+ validate_assignment=True,
45
+ protected_namespaces=(),
46
+ )
47
+
48
+
49
+ def to_str(self) -> str:
50
+ """Returns the string representation of the model using alias"""
51
+ return pprint.pformat(self.model_dump(by_alias=True))
52
+
53
+ def to_json(self) -> str:
54
+ """Returns the JSON representation of the model using alias"""
55
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
56
+ return json.dumps(self.to_dict())
57
+
58
+ @classmethod
59
+ def from_json(cls, json_str: str) -> Optional[Self]:
60
+ """Create an instance of BoostQueryResult from a JSON string"""
61
+ return cls.from_dict(json.loads(json_str))
62
+
63
+ def to_dict(self) -> Dict[str, Any]:
64
+ """Return the dictionary representation of the model using alias.
65
+
66
+ This has the following differences from calling pydantic's
67
+ `self.model_dump(by_alias=True)`:
68
+
69
+ * `None` is only added to the output dict for nullable fields that
70
+ were set at model initialization. Other fields with value `None`
71
+ are ignored.
72
+ """
73
+ excluded_fields: Set[str] = set([
74
+ ])
75
+
76
+ _dict = self.model_dump(
77
+ by_alias=True,
78
+ exclude=excluded_fields,
79
+ exclude_none=True,
80
+ )
81
+ return _dict
82
+
83
+ @classmethod
84
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
85
+ """Create an instance of BoostQueryResult from a dict"""
86
+ if obj is None:
87
+ return None
88
+
89
+ if not isinstance(obj, dict):
90
+ return cls.model_validate(obj)
91
+
92
+ _obj = cls.model_validate({
93
+ "status": obj.get("status"),
94
+ "activeCampaigns": obj.get("activeCampaigns"),
95
+ "inactiveCampaigns": obj.get("inactiveCampaigns"),
96
+ "unknownCampaigns": obj.get("unknownCampaigns")
97
+ })
98
+ return _obj
99
+
100
+
@@ -0,0 +1,39 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Rapidata.Dataset
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: v1
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import json
17
+ from enum import Enum
18
+ from typing_extensions import Self
19
+
20
+
21
+ class BoostStatus(str, Enum):
22
+ """
23
+ BoostStatus
24
+ """
25
+
26
+ """
27
+ allowed enum values
28
+ """
29
+ ACTIVE = 'Active'
30
+ INACTIVE = 'Inactive'
31
+ PARTIAL = 'Partial'
32
+ UNKNOWN = 'Unknown'
33
+
34
+ @classmethod
35
+ def from_json(cls, json_str: str) -> Self:
36
+ """Create an instance of BoostStatus from a JSON string"""
37
+ return cls(json.loads(json_str))
38
+
39
+
@@ -24,13 +24,15 @@ from rapidata.api_client.models.location_metadata import LocationMetadata
24
24
  from rapidata.api_client.models.original_filename_metadata import OriginalFilenameMetadata
25
25
  from rapidata.api_client.models.prompt_metadata import PromptMetadata
26
26
  from rapidata.api_client.models.source_url_metadata import SourceUrlMetadata
27
+ from rapidata.api_client.models.streams_metadata import StreamsMetadata
27
28
  from rapidata.api_client.models.text_metadata import TextMetadata
28
29
  from rapidata.api_client.models.transcription_metadata import TranscriptionMetadata
30
+ from rapidata.api_client.models.video_duration_metadata import VideoDurationMetadata
29
31
  from pydantic import StrictStr, Field
30
32
  from typing import Union, List, Set, Optional, Dict
31
33
  from typing_extensions import Literal, Self
32
34
 
33
- COMPAREWORKFLOWCONFIGMETADATAVALUE_ONE_OF_SCHEMAS = ["AssetMetadata", "ClassificationMetadata", "CountMetadata", "ImageDimensionMetadata", "LocationMetadata", "OriginalFilenameMetadata", "PromptMetadata", "SourceUrlMetadata", "TextMetadata", "TranscriptionMetadata"]
35
+ COMPAREWORKFLOWCONFIGMETADATAVALUE_ONE_OF_SCHEMAS = ["AssetMetadata", "ClassificationMetadata", "CountMetadata", "ImageDimensionMetadata", "LocationMetadata", "OriginalFilenameMetadata", "PromptMetadata", "SourceUrlMetadata", "StreamsMetadata", "TextMetadata", "TranscriptionMetadata", "VideoDurationMetadata"]
34
36
 
35
37
  class CompareWorkflowConfigMetadataValue(BaseModel):
36
38
  """
@@ -52,12 +54,16 @@ class CompareWorkflowConfigMetadataValue(BaseModel):
52
54
  oneof_schema_7_validator: Optional[PromptMetadata] = None
53
55
  # data type: SourceUrlMetadata
54
56
  oneof_schema_8_validator: Optional[SourceUrlMetadata] = None
57
+ # data type: StreamsMetadata
58
+ oneof_schema_9_validator: Optional[StreamsMetadata] = None
55
59
  # data type: TextMetadata
56
- oneof_schema_9_validator: Optional[TextMetadata] = None
60
+ oneof_schema_10_validator: Optional[TextMetadata] = None
57
61
  # data type: TranscriptionMetadata
58
- oneof_schema_10_validator: Optional[TranscriptionMetadata] = None
59
- actual_instance: Optional[Union[AssetMetadata, ClassificationMetadata, CountMetadata, ImageDimensionMetadata, LocationMetadata, OriginalFilenameMetadata, PromptMetadata, SourceUrlMetadata, TextMetadata, TranscriptionMetadata]] = None
60
- one_of_schemas: Set[str] = { "AssetMetadata", "ClassificationMetadata", "CountMetadata", "ImageDimensionMetadata", "LocationMetadata", "OriginalFilenameMetadata", "PromptMetadata", "SourceUrlMetadata", "TextMetadata", "TranscriptionMetadata" }
62
+ oneof_schema_11_validator: Optional[TranscriptionMetadata] = None
63
+ # data type: VideoDurationMetadata
64
+ oneof_schema_12_validator: Optional[VideoDurationMetadata] = None
65
+ actual_instance: Optional[Union[AssetMetadata, ClassificationMetadata, CountMetadata, ImageDimensionMetadata, LocationMetadata, OriginalFilenameMetadata, PromptMetadata, SourceUrlMetadata, StreamsMetadata, TextMetadata, TranscriptionMetadata, VideoDurationMetadata]] = None
66
+ one_of_schemas: Set[str] = { "AssetMetadata", "ClassificationMetadata", "CountMetadata", "ImageDimensionMetadata", "LocationMetadata", "OriginalFilenameMetadata", "PromptMetadata", "SourceUrlMetadata", "StreamsMetadata", "TextMetadata", "TranscriptionMetadata", "VideoDurationMetadata" }
61
67
 
62
68
  model_config = ConfigDict(
63
69
  validate_assignment=True,
@@ -123,6 +129,11 @@ class CompareWorkflowConfigMetadataValue(BaseModel):
123
129
  error_messages.append(f"Error! Input type `{type(v)}` is not `SourceUrlMetadata`")
124
130
  else:
125
131
  match += 1
132
+ # validate data type: StreamsMetadata
133
+ if not isinstance(v, StreamsMetadata):
134
+ error_messages.append(f"Error! Input type `{type(v)}` is not `StreamsMetadata`")
135
+ else:
136
+ match += 1
126
137
  # validate data type: TextMetadata
127
138
  if not isinstance(v, TextMetadata):
128
139
  error_messages.append(f"Error! Input type `{type(v)}` is not `TextMetadata`")
@@ -133,12 +144,17 @@ class CompareWorkflowConfigMetadataValue(BaseModel):
133
144
  error_messages.append(f"Error! Input type `{type(v)}` is not `TranscriptionMetadata`")
134
145
  else:
135
146
  match += 1
147
+ # validate data type: VideoDurationMetadata
148
+ if not isinstance(v, VideoDurationMetadata):
149
+ error_messages.append(f"Error! Input type `{type(v)}` is not `VideoDurationMetadata`")
150
+ else:
151
+ match += 1
136
152
  if match > 1:
137
153
  # more than 1 match
138
- raise ValueError("Multiple matches found when setting `actual_instance` in CompareWorkflowConfigMetadataValue with oneOf schemas: AssetMetadata, ClassificationMetadata, CountMetadata, ImageDimensionMetadata, LocationMetadata, OriginalFilenameMetadata, PromptMetadata, SourceUrlMetadata, TextMetadata, TranscriptionMetadata. Details: " + ", ".join(error_messages))
154
+ raise ValueError("Multiple matches found when setting `actual_instance` in CompareWorkflowConfigMetadataValue with oneOf schemas: AssetMetadata, ClassificationMetadata, CountMetadata, ImageDimensionMetadata, LocationMetadata, OriginalFilenameMetadata, PromptMetadata, SourceUrlMetadata, StreamsMetadata, TextMetadata, TranscriptionMetadata, VideoDurationMetadata. Details: " + ", ".join(error_messages))
139
155
  elif match == 0:
140
156
  # no match
141
- raise ValueError("No match found when setting `actual_instance` in CompareWorkflowConfigMetadataValue with oneOf schemas: AssetMetadata, ClassificationMetadata, CountMetadata, ImageDimensionMetadata, LocationMetadata, OriginalFilenameMetadata, PromptMetadata, SourceUrlMetadata, TextMetadata, TranscriptionMetadata. Details: " + ", ".join(error_messages))
157
+ raise ValueError("No match found when setting `actual_instance` in CompareWorkflowConfigMetadataValue with oneOf schemas: AssetMetadata, ClassificationMetadata, CountMetadata, ImageDimensionMetadata, LocationMetadata, OriginalFilenameMetadata, PromptMetadata, SourceUrlMetadata, StreamsMetadata, TextMetadata, TranscriptionMetadata, VideoDurationMetadata. Details: " + ", ".join(error_messages))
142
158
  else:
143
159
  return v
144
160
 
@@ -201,6 +217,12 @@ class CompareWorkflowConfigMetadataValue(BaseModel):
201
217
  match += 1
202
218
  except (ValidationError, ValueError) as e:
203
219
  error_messages.append(str(e))
220
+ # deserialize data into StreamsMetadata
221
+ try:
222
+ instance.actual_instance = StreamsMetadata.from_json(json_str)
223
+ match += 1
224
+ except (ValidationError, ValueError) as e:
225
+ error_messages.append(str(e))
204
226
  # deserialize data into TextMetadata
205
227
  try:
206
228
  instance.actual_instance = TextMetadata.from_json(json_str)
@@ -213,13 +235,19 @@ class CompareWorkflowConfigMetadataValue(BaseModel):
213
235
  match += 1
214
236
  except (ValidationError, ValueError) as e:
215
237
  error_messages.append(str(e))
238
+ # deserialize data into VideoDurationMetadata
239
+ try:
240
+ instance.actual_instance = VideoDurationMetadata.from_json(json_str)
241
+ match += 1
242
+ except (ValidationError, ValueError) as e:
243
+ error_messages.append(str(e))
216
244
 
217
245
  if match > 1:
218
246
  # more than 1 match
219
- raise ValueError("Multiple matches found when deserializing the JSON string into CompareWorkflowConfigMetadataValue with oneOf schemas: AssetMetadata, ClassificationMetadata, CountMetadata, ImageDimensionMetadata, LocationMetadata, OriginalFilenameMetadata, PromptMetadata, SourceUrlMetadata, TextMetadata, TranscriptionMetadata. Details: " + ", ".join(error_messages))
247
+ raise ValueError("Multiple matches found when deserializing the JSON string into CompareWorkflowConfigMetadataValue with oneOf schemas: AssetMetadata, ClassificationMetadata, CountMetadata, ImageDimensionMetadata, LocationMetadata, OriginalFilenameMetadata, PromptMetadata, SourceUrlMetadata, StreamsMetadata, TextMetadata, TranscriptionMetadata, VideoDurationMetadata. Details: " + ", ".join(error_messages))
220
248
  elif match == 0:
221
249
  # no match
222
- raise ValueError("No match found when deserializing the JSON string into CompareWorkflowConfigMetadataValue with oneOf schemas: AssetMetadata, ClassificationMetadata, CountMetadata, ImageDimensionMetadata, LocationMetadata, OriginalFilenameMetadata, PromptMetadata, SourceUrlMetadata, TextMetadata, TranscriptionMetadata. Details: " + ", ".join(error_messages))
250
+ raise ValueError("No match found when deserializing the JSON string into CompareWorkflowConfigMetadataValue with oneOf schemas: AssetMetadata, ClassificationMetadata, CountMetadata, ImageDimensionMetadata, LocationMetadata, OriginalFilenameMetadata, PromptMetadata, SourceUrlMetadata, StreamsMetadata, TextMetadata, TranscriptionMetadata, VideoDurationMetadata. Details: " + ", ".join(error_messages))
223
251
  else:
224
252
  return instance
225
253
 
@@ -233,7 +261,7 @@ class CompareWorkflowConfigMetadataValue(BaseModel):
233
261
  else:
234
262
  return json.dumps(self.actual_instance)
235
263
 
236
- def to_dict(self) -> Optional[Union[Dict[str, Any], AssetMetadata, ClassificationMetadata, CountMetadata, ImageDimensionMetadata, LocationMetadata, OriginalFilenameMetadata, PromptMetadata, SourceUrlMetadata, TextMetadata, TranscriptionMetadata]]:
264
+ def to_dict(self) -> Optional[Union[Dict[str, Any], AssetMetadata, ClassificationMetadata, CountMetadata, ImageDimensionMetadata, LocationMetadata, OriginalFilenameMetadata, PromptMetadata, SourceUrlMetadata, StreamsMetadata, TextMetadata, TranscriptionMetadata, VideoDurationMetadata]]:
237
265
  """Returns the dict representation of the actual instance"""
238
266
  if self.actual_instance is None:
239
267
  return None
@@ -0,0 +1,105 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Rapidata.Dataset
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: v1
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictInt
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from rapidata.api_client.models.datapoint_model import DatapointModel
23
+ from typing import Optional, Set
24
+ from typing_extensions import Self
25
+
26
+ class DatapointModelPagedResult(BaseModel):
27
+ """
28
+ DatapointModelPagedResult
29
+ """ # noqa: E501
30
+ total: StrictInt
31
+ page: StrictInt
32
+ page_size: StrictInt = Field(alias="pageSize")
33
+ items: List[DatapointModel]
34
+ total_pages: Optional[StrictInt] = Field(default=None, alias="totalPages")
35
+ __properties: ClassVar[List[str]] = ["total", "page", "pageSize", "items", "totalPages"]
36
+
37
+ model_config = ConfigDict(
38
+ populate_by_name=True,
39
+ validate_assignment=True,
40
+ protected_namespaces=(),
41
+ )
42
+
43
+
44
+ def to_str(self) -> str:
45
+ """Returns the string representation of the model using alias"""
46
+ return pprint.pformat(self.model_dump(by_alias=True))
47
+
48
+ def to_json(self) -> str:
49
+ """Returns the JSON representation of the model using alias"""
50
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
51
+ return json.dumps(self.to_dict())
52
+
53
+ @classmethod
54
+ def from_json(cls, json_str: str) -> Optional[Self]:
55
+ """Create an instance of DatapointModelPagedResult from a JSON string"""
56
+ return cls.from_dict(json.loads(json_str))
57
+
58
+ def to_dict(self) -> Dict[str, Any]:
59
+ """Return the dictionary representation of the model using alias.
60
+
61
+ This has the following differences from calling pydantic's
62
+ `self.model_dump(by_alias=True)`:
63
+
64
+ * `None` is only added to the output dict for nullable fields that
65
+ were set at model initialization. Other fields with value `None`
66
+ are ignored.
67
+ * OpenAPI `readOnly` fields are excluded.
68
+ """
69
+ excluded_fields: Set[str] = set([
70
+ "total_pages",
71
+ ])
72
+
73
+ _dict = self.model_dump(
74
+ by_alias=True,
75
+ exclude=excluded_fields,
76
+ exclude_none=True,
77
+ )
78
+ # override the default output from pydantic by calling `to_dict()` of each item in items (list)
79
+ _items = []
80
+ if self.items:
81
+ for _item_items in self.items:
82
+ if _item_items:
83
+ _items.append(_item_items.to_dict())
84
+ _dict['items'] = _items
85
+ return _dict
86
+
87
+ @classmethod
88
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
89
+ """Create an instance of DatapointModelPagedResult from a dict"""
90
+ if obj is None:
91
+ return None
92
+
93
+ if not isinstance(obj, dict):
94
+ return cls.model_validate(obj)
95
+
96
+ _obj = cls.model_validate({
97
+ "total": obj.get("total"),
98
+ "page": obj.get("page"),
99
+ "pageSize": obj.get("pageSize"),
100
+ "items": [DatapointModel.from_dict(_item) for _item in obj["items"]] if obj.get("items") is not None else None,
101
+ "totalPages": obj.get("totalPages")
102
+ })
103
+ return _obj
104
+
105
+
@@ -0,0 +1,115 @@
1
+ # coding: utf-8
2
+
3
+ """
4
+ Rapidata.Dataset
5
+
6
+ No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
7
+
8
+ The version of the OpenAPI document: v1
9
+ Generated by OpenAPI Generator (https://openapi-generator.tech)
10
+
11
+ Do not edit the class manually.
12
+ """ # noqa: E501
13
+
14
+
15
+ from __future__ import annotations
16
+ import pprint
17
+ import re # noqa: F401
18
+ import json
19
+
20
+ from pydantic import BaseModel, ConfigDict, Field, StrictInt, StrictStr, field_validator
21
+ from typing import Any, ClassVar, Dict, List, Optional
22
+ from typing import Optional, Set
23
+ from typing_extensions import Self
24
+
25
+ class EffortCappedSelection(BaseModel):
26
+ """
27
+ EffortCappedSelection
28
+ """ # noqa: E501
29
+ t: StrictStr = Field(description="Discriminator value for EffortCappedSelection", alias="_t")
30
+ effort_budget: StrictInt = Field(alias="effortBudget")
31
+ retrieval_mode: Optional[StrictStr] = Field(default=None, alias="retrievalMode")
32
+ max_iterations: Optional[StrictInt] = Field(default=None, alias="maxIterations")
33
+ __properties: ClassVar[List[str]] = ["_t", "effortBudget", "retrievalMode", "maxIterations"]
34
+
35
+ @field_validator('t')
36
+ def t_validate_enum(cls, value):
37
+ """Validates the enum"""
38
+ if value not in set(['EffortCappedSelection']):
39
+ raise ValueError("must be one of enum values ('EffortCappedSelection')")
40
+ return value
41
+
42
+ @field_validator('retrieval_mode')
43
+ def retrieval_mode_validate_enum(cls, value):
44
+ """Validates the enum"""
45
+ if value is None:
46
+ return value
47
+
48
+ if value not in set(['Random', 'Shuffled', 'Sequential']):
49
+ raise ValueError("must be one of enum values ('Random', 'Shuffled', 'Sequential')")
50
+ return value
51
+
52
+ model_config = ConfigDict(
53
+ populate_by_name=True,
54
+ validate_assignment=True,
55
+ protected_namespaces=(),
56
+ )
57
+
58
+
59
+ def to_str(self) -> str:
60
+ """Returns the string representation of the model using alias"""
61
+ return pprint.pformat(self.model_dump(by_alias=True))
62
+
63
+ def to_json(self) -> str:
64
+ """Returns the JSON representation of the model using alias"""
65
+ # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
66
+ return json.dumps(self.to_dict())
67
+
68
+ @classmethod
69
+ def from_json(cls, json_str: str) -> Optional[Self]:
70
+ """Create an instance of EffortCappedSelection from a JSON string"""
71
+ return cls.from_dict(json.loads(json_str))
72
+
73
+ def to_dict(self) -> Dict[str, Any]:
74
+ """Return the dictionary representation of the model using alias.
75
+
76
+ This has the following differences from calling pydantic's
77
+ `self.model_dump(by_alias=True)`:
78
+
79
+ * `None` is only added to the output dict for nullable fields that
80
+ were set at model initialization. Other fields with value `None`
81
+ are ignored.
82
+ """
83
+ excluded_fields: Set[str] = set([
84
+ ])
85
+
86
+ _dict = self.model_dump(
87
+ by_alias=True,
88
+ exclude=excluded_fields,
89
+ exclude_none=True,
90
+ )
91
+ # set to None if max_iterations (nullable) is None
92
+ # and model_fields_set contains the field
93
+ if self.max_iterations is None and "max_iterations" in self.model_fields_set:
94
+ _dict['maxIterations'] = None
95
+
96
+ return _dict
97
+
98
+ @classmethod
99
+ def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
100
+ """Create an instance of EffortCappedSelection from a dict"""
101
+ if obj is None:
102
+ return None
103
+
104
+ if not isinstance(obj, dict):
105
+ return cls.model_validate(obj)
106
+
107
+ _obj = cls.model_validate({
108
+ "_t": obj.get("_t") if obj.get("_t") is not None else 'EffortCappedSelection',
109
+ "effortBudget": obj.get("effortBudget"),
110
+ "retrievalMode": obj.get("retrievalMode"),
111
+ "maxIterations": obj.get("maxIterations")
112
+ })
113
+ return _obj
114
+
115
+