orca-sdk 0.0.92__py3-none-any.whl → 0.0.94__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. orca_sdk/_generated_api_client/api/__init__.py +8 -0
  2. orca_sdk/_generated_api_client/api/datasource/download_datasource_datasource_name_or_id_download_get.py +148 -0
  3. orca_sdk/_generated_api_client/api/memoryset/suggest_cascading_edits_memoryset_name_or_id_memory_memory_id_cascading_edits_post.py +233 -0
  4. orca_sdk/_generated_api_client/api/task/list_tasks_task_get.py +60 -10
  5. orca_sdk/_generated_api_client/api/telemetry/count_predictions_telemetry_prediction_count_post.py +10 -10
  6. orca_sdk/_generated_api_client/models/__init__.py +10 -0
  7. orca_sdk/_generated_api_client/models/cascade_edit_suggestions_request.py +154 -0
  8. orca_sdk/_generated_api_client/models/cascading_edit_suggestion.py +92 -0
  9. orca_sdk/_generated_api_client/models/classification_evaluation_result.py +62 -0
  10. orca_sdk/_generated_api_client/models/count_predictions_request.py +195 -0
  11. orca_sdk/_generated_api_client/models/filter_item_field_type_0_item.py +1 -0
  12. orca_sdk/_generated_api_client/models/http_validation_error.py +86 -0
  13. orca_sdk/_generated_api_client/models/label_prediction_memory_lookup.py +8 -0
  14. orca_sdk/_generated_api_client/models/labeled_memory.py +8 -0
  15. orca_sdk/_generated_api_client/models/labeled_memory_lookup.py +8 -0
  16. orca_sdk/_generated_api_client/models/labeled_memory_with_feedback_metrics.py +8 -0
  17. orca_sdk/_generated_api_client/models/list_predictions_request.py +62 -0
  18. orca_sdk/_generated_api_client/models/memoryset_analysis_configs.py +0 -20
  19. orca_sdk/_generated_api_client/models/prediction_request.py +16 -7
  20. orca_sdk/_generated_api_client/models/pretrained_embedding_model_name.py +5 -0
  21. orca_sdk/_generated_api_client/models/validation_error.py +99 -0
  22. orca_sdk/_utils/data_parsing.py +31 -2
  23. orca_sdk/_utils/data_parsing_test.py +18 -15
  24. orca_sdk/_utils/tqdm_file_reader.py +12 -0
  25. orca_sdk/classification_model.py +32 -12
  26. orca_sdk/classification_model_test.py +95 -34
  27. orca_sdk/conftest.py +87 -25
  28. orca_sdk/datasource.py +56 -12
  29. orca_sdk/datasource_test.py +9 -0
  30. orca_sdk/embedding_model_test.py +6 -5
  31. orca_sdk/memoryset.py +78 -0
  32. orca_sdk/memoryset_test.py +199 -123
  33. orca_sdk/telemetry.py +5 -3
  34. {orca_sdk-0.0.92.dist-info → orca_sdk-0.0.94.dist-info}/METADATA +1 -1
  35. {orca_sdk-0.0.92.dist-info → orca_sdk-0.0.94.dist-info}/RECORD +36 -28
  36. {orca_sdk-0.0.92.dist-info → orca_sdk-0.0.94.dist-info}/WHEEL +0 -0
@@ -6,12 +6,15 @@ from .api_key_metadata_scope_item import ApiKeyMetadataScopeItem
6
6
  from .base_label_prediction_result import BaseLabelPredictionResult
7
7
  from .base_model import BaseModel
8
8
  from .body_create_datasource_datasource_post import BodyCreateDatasourceDatasourcePost
9
+ from .cascade_edit_suggestions_request import CascadeEditSuggestionsRequest
10
+ from .cascading_edit_suggestion import CascadingEditSuggestion
9
11
  from .classification_evaluation_result import ClassificationEvaluationResult
10
12
  from .clone_labeled_memoryset_request import CloneLabeledMemorysetRequest
11
13
  from .cluster_metrics import ClusterMetrics
12
14
  from .column_info import ColumnInfo
13
15
  from .column_type import ColumnType
14
16
  from .constraint_violation_error_response import ConstraintViolationErrorResponse
17
+ from .count_predictions_request import CountPredictionsRequest
15
18
  from .create_api_key_request import CreateApiKeyRequest
16
19
  from .create_api_key_request_scope_item import CreateApiKeyRequestScopeItem
17
20
  from .create_api_key_response import CreateApiKeyResponse
@@ -39,6 +42,7 @@ from .finetune_embedding_model_request import FinetuneEmbeddingModelRequest
39
42
  from .finetune_embedding_model_request_training_args import FinetuneEmbeddingModelRequestTrainingArgs
40
43
  from .finetuned_embedding_model_metadata import FinetunedEmbeddingModelMetadata
41
44
  from .get_memories_request import GetMemoriesRequest
45
+ from .http_validation_error import HTTPValidationError
42
46
  from .internal_server_error_response import InternalServerErrorResponse
43
47
  from .label_class_metrics import LabelClassMetrics
44
48
  from .label_prediction_memory_lookup import LabelPredictionMemoryLookup
@@ -110,6 +114,7 @@ from .telemetry_sort_options_direction import TelemetrySortOptionsDirection
110
114
  from .unauthenticated_error_response import UnauthenticatedErrorResponse
111
115
  from .unauthorized_error_response import UnauthorizedErrorResponse
112
116
  from .update_prediction_request import UpdatePredictionRequest
117
+ from .validation_error import ValidationError
113
118
 
114
119
  __all__ = (
115
120
  "AnalyzeNeighborLabelsResult",
@@ -118,12 +123,15 @@ __all__ = (
118
123
  "BaseLabelPredictionResult",
119
124
  "BaseModel",
120
125
  "BodyCreateDatasourceDatasourcePost",
126
+ "CascadeEditSuggestionsRequest",
127
+ "CascadingEditSuggestion",
121
128
  "ClassificationEvaluationResult",
122
129
  "CloneLabeledMemorysetRequest",
123
130
  "ClusterMetrics",
124
131
  "ColumnInfo",
125
132
  "ColumnType",
126
133
  "ConstraintViolationErrorResponse",
134
+ "CountPredictionsRequest",
127
135
  "CreateApiKeyRequest",
128
136
  "CreateApiKeyRequestScopeItem",
129
137
  "CreateApiKeyResponse",
@@ -151,6 +159,7 @@ __all__ = (
151
159
  "FinetuneEmbeddingModelRequest",
152
160
  "FinetuneEmbeddingModelRequestTrainingArgs",
153
161
  "GetMemoriesRequest",
162
+ "HTTPValidationError",
154
163
  "InternalServerErrorResponse",
155
164
  "LabelClassMetrics",
156
165
  "LabeledMemory",
@@ -222,4 +231,5 @@ __all__ = (
222
231
  "UnauthenticatedErrorResponse",
223
232
  "UnauthorizedErrorResponse",
224
233
  "UpdatePredictionRequest",
234
+ "ValidationError",
225
235
  )
@@ -0,0 +1,154 @@
1
+ """
2
+ This file is generated by the openapi-python-client tool via the generate_api_client.py script
3
+
4
+ It is a customized template from the openapi-python-client tool's default template:
5
+ https://github.com/openapi-generators/openapi-python-client/blob/861ef5622f10fc96d240dc9becb0edf94e61446c/openapi_python_client/templates/model.py.jinja
6
+
7
+ The main change is:
8
+ - Fix typing issues
9
+ """
10
+
11
+ # flake8: noqa: C901
12
+
13
+ from typing import Any, Type, TypeVar, Union, cast
14
+
15
+ from attrs import define as _attrs_define
16
+ from attrs import field as _attrs_field
17
+
18
+ from ..types import UNSET, Unset
19
+
20
+ T = TypeVar("T", bound="CascadeEditSuggestionsRequest")
21
+
22
+
23
+ @_attrs_define
24
+ class CascadeEditSuggestionsRequest:
25
+ """
26
+ Attributes:
27
+ old_label (int):
28
+ new_label (int):
29
+ max_neighbors (Union[Unset, int]): Default: 50.
30
+ max_validation_neighbors (Union[Unset, int]): Default: 10.
31
+ similarity_threshold (Union[None, Unset, float]):
32
+ only_if_has_old_label (Union[Unset, bool]): Default: True.
33
+ exclude_if_new_label (Union[Unset, bool]): Default: True.
34
+ suggestion_cooldown_time (Union[Unset, float]): Default: 86400.0.
35
+ label_confirmation_cooldown_time (Union[Unset, float]): Default: 604800.0.
36
+ """
37
+
38
+ old_label: int
39
+ new_label: int
40
+ max_neighbors: Union[Unset, int] = 50
41
+ max_validation_neighbors: Union[Unset, int] = 10
42
+ similarity_threshold: Union[None, Unset, float] = UNSET
43
+ only_if_has_old_label: Union[Unset, bool] = True
44
+ exclude_if_new_label: Union[Unset, bool] = True
45
+ suggestion_cooldown_time: Union[Unset, float] = 86400.0
46
+ label_confirmation_cooldown_time: Union[Unset, float] = 604800.0
47
+ additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
48
+
49
+ def to_dict(self) -> dict[str, Any]:
50
+ old_label = self.old_label
51
+
52
+ new_label = self.new_label
53
+
54
+ max_neighbors = self.max_neighbors
55
+
56
+ max_validation_neighbors = self.max_validation_neighbors
57
+
58
+ similarity_threshold: Union[None, Unset, float]
59
+ if isinstance(self.similarity_threshold, Unset):
60
+ similarity_threshold = UNSET
61
+ else:
62
+ similarity_threshold = self.similarity_threshold
63
+
64
+ only_if_has_old_label = self.only_if_has_old_label
65
+
66
+ exclude_if_new_label = self.exclude_if_new_label
67
+
68
+ suggestion_cooldown_time = self.suggestion_cooldown_time
69
+
70
+ label_confirmation_cooldown_time = self.label_confirmation_cooldown_time
71
+
72
+ field_dict: dict[str, Any] = {}
73
+ field_dict.update(self.additional_properties)
74
+ field_dict.update(
75
+ {
76
+ "old_label": old_label,
77
+ "new_label": new_label,
78
+ }
79
+ )
80
+ if max_neighbors is not UNSET:
81
+ field_dict["max_neighbors"] = max_neighbors
82
+ if max_validation_neighbors is not UNSET:
83
+ field_dict["max_validation_neighbors"] = max_validation_neighbors
84
+ if similarity_threshold is not UNSET:
85
+ field_dict["similarity_threshold"] = similarity_threshold
86
+ if only_if_has_old_label is not UNSET:
87
+ field_dict["only_if_has_old_label"] = only_if_has_old_label
88
+ if exclude_if_new_label is not UNSET:
89
+ field_dict["exclude_if_new_label"] = exclude_if_new_label
90
+ if suggestion_cooldown_time is not UNSET:
91
+ field_dict["suggestion_cooldown_time"] = suggestion_cooldown_time
92
+ if label_confirmation_cooldown_time is not UNSET:
93
+ field_dict["label_confirmation_cooldown_time"] = label_confirmation_cooldown_time
94
+
95
+ return field_dict
96
+
97
+ @classmethod
98
+ def from_dict(cls: Type[T], src_dict: dict[str, Any]) -> T:
99
+ d = src_dict.copy()
100
+ old_label = d.pop("old_label")
101
+
102
+ new_label = d.pop("new_label")
103
+
104
+ max_neighbors = d.pop("max_neighbors", UNSET)
105
+
106
+ max_validation_neighbors = d.pop("max_validation_neighbors", UNSET)
107
+
108
+ def _parse_similarity_threshold(data: object) -> Union[None, Unset, float]:
109
+ if data is None:
110
+ return data
111
+ if isinstance(data, Unset):
112
+ return data
113
+ return cast(Union[None, Unset, float], data)
114
+
115
+ similarity_threshold = _parse_similarity_threshold(d.pop("similarity_threshold", UNSET))
116
+
117
+ only_if_has_old_label = d.pop("only_if_has_old_label", UNSET)
118
+
119
+ exclude_if_new_label = d.pop("exclude_if_new_label", UNSET)
120
+
121
+ suggestion_cooldown_time = d.pop("suggestion_cooldown_time", UNSET)
122
+
123
+ label_confirmation_cooldown_time = d.pop("label_confirmation_cooldown_time", UNSET)
124
+
125
+ cascade_edit_suggestions_request = cls(
126
+ old_label=old_label,
127
+ new_label=new_label,
128
+ max_neighbors=max_neighbors,
129
+ max_validation_neighbors=max_validation_neighbors,
130
+ similarity_threshold=similarity_threshold,
131
+ only_if_has_old_label=only_if_has_old_label,
132
+ exclude_if_new_label=exclude_if_new_label,
133
+ suggestion_cooldown_time=suggestion_cooldown_time,
134
+ label_confirmation_cooldown_time=label_confirmation_cooldown_time,
135
+ )
136
+
137
+ cascade_edit_suggestions_request.additional_properties = d
138
+ return cascade_edit_suggestions_request
139
+
140
+ @property
141
+ def additional_keys(self) -> list[str]:
142
+ return list(self.additional_properties.keys())
143
+
144
+ def __getitem__(self, key: str) -> Any:
145
+ return self.additional_properties[key]
146
+
147
+ def __setitem__(self, key: str, value: Any) -> None:
148
+ self.additional_properties[key] = value
149
+
150
+ def __delitem__(self, key: str) -> None:
151
+ del self.additional_properties[key]
152
+
153
+ def __contains__(self, key: str) -> bool:
154
+ return key in self.additional_properties
@@ -0,0 +1,92 @@
1
+ """
2
+ This file is generated by the openapi-python-client tool via the generate_api_client.py script
3
+
4
+ It is a customized template from the openapi-python-client tool's default template:
5
+ https://github.com/openapi-generators/openapi-python-client/blob/861ef5622f10fc96d240dc9becb0edf94e61446c/openapi_python_client/templates/model.py.jinja
6
+
7
+ The main change is:
8
+ - Fix typing issues
9
+ """
10
+
11
+ # flake8: noqa: C901
12
+
13
+ from typing import TYPE_CHECKING, Any, Type, TypeVar
14
+
15
+ from attrs import define as _attrs_define
16
+ from attrs import field as _attrs_field
17
+
18
+ if TYPE_CHECKING:
19
+ from ..models.labeled_memory_lookup import LabeledMemoryLookup
20
+
21
+
22
+ T = TypeVar("T", bound="CascadingEditSuggestion")
23
+
24
+
25
+ @_attrs_define
26
+ class CascadingEditSuggestion:
27
+ """
28
+ Attributes:
29
+ neighbor (LabeledMemoryLookup): Single labeled memory lookup result.
30
+ suggested_label (int):
31
+ lookup_score (float):
32
+ """
33
+
34
+ neighbor: "LabeledMemoryLookup"
35
+ suggested_label: int
36
+ lookup_score: float
37
+ additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
38
+
39
+ def to_dict(self) -> dict[str, Any]:
40
+ neighbor = self.neighbor.to_dict()
41
+
42
+ suggested_label = self.suggested_label
43
+
44
+ lookup_score = self.lookup_score
45
+
46
+ field_dict: dict[str, Any] = {}
47
+ field_dict.update(self.additional_properties)
48
+ field_dict.update(
49
+ {
50
+ "neighbor": neighbor,
51
+ "suggested_label": suggested_label,
52
+ "lookup_score": lookup_score,
53
+ }
54
+ )
55
+
56
+ return field_dict
57
+
58
+ @classmethod
59
+ def from_dict(cls: Type[T], src_dict: dict[str, Any]) -> T:
60
+ from ..models.labeled_memory_lookup import LabeledMemoryLookup
61
+
62
+ d = src_dict.copy()
63
+ neighbor = LabeledMemoryLookup.from_dict(d.pop("neighbor"))
64
+
65
+ suggested_label = d.pop("suggested_label")
66
+
67
+ lookup_score = d.pop("lookup_score")
68
+
69
+ cascading_edit_suggestion = cls(
70
+ neighbor=neighbor,
71
+ suggested_label=suggested_label,
72
+ lookup_score=lookup_score,
73
+ )
74
+
75
+ cascading_edit_suggestion.additional_properties = d
76
+ return cascading_edit_suggestion
77
+
78
+ @property
79
+ def additional_keys(self) -> list[str]:
80
+ return list(self.additional_properties.keys())
81
+
82
+ def __getitem__(self, key: str) -> Any:
83
+ return self.additional_properties[key]
84
+
85
+ def __setitem__(self, key: str, value: Any) -> None:
86
+ self.additional_properties[key] = value
87
+
88
+ def __delitem__(self, key: str) -> None:
89
+ del self.additional_properties[key]
90
+
91
+ def __contains__(self, key: str) -> bool:
92
+ return key in self.additional_properties
@@ -15,6 +15,8 @@ from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar, Union, cast
15
15
  from attrs import define as _attrs_define
16
16
  from attrs import field as _attrs_field
17
17
 
18
+ from ..types import UNSET, Unset
19
+
18
20
  if TYPE_CHECKING:
19
21
  from ..models.precision_recall_curve import PrecisionRecallCurve
20
22
  from ..models.roc_curve import ROCCurve
@@ -32,6 +34,9 @@ class ClassificationEvaluationResult:
32
34
  loss (float):
33
35
  precision_recall_curve (Union['PrecisionRecallCurve', None]):
34
36
  roc_curve (Union['ROCCurve', None]):
37
+ anomaly_score_mean (Union[None, Unset, float]):
38
+ anomaly_score_median (Union[None, Unset, float]):
39
+ anomaly_score_variance (Union[None, Unset, float]):
35
40
  """
36
41
 
37
42
  f1_score: float
@@ -39,6 +44,9 @@ class ClassificationEvaluationResult:
39
44
  loss: float
40
45
  precision_recall_curve: Union["PrecisionRecallCurve", None]
41
46
  roc_curve: Union["ROCCurve", None]
47
+ anomaly_score_mean: Union[None, Unset, float] = UNSET
48
+ anomaly_score_median: Union[None, Unset, float] = UNSET
49
+ anomaly_score_variance: Union[None, Unset, float] = UNSET
42
50
  additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
43
51
 
44
52
  def to_dict(self) -> dict[str, Any]:
@@ -63,6 +71,24 @@ class ClassificationEvaluationResult:
63
71
  else:
64
72
  roc_curve = self.roc_curve
65
73
 
74
+ anomaly_score_mean: Union[None, Unset, float]
75
+ if isinstance(self.anomaly_score_mean, Unset):
76
+ anomaly_score_mean = UNSET
77
+ else:
78
+ anomaly_score_mean = self.anomaly_score_mean
79
+
80
+ anomaly_score_median: Union[None, Unset, float]
81
+ if isinstance(self.anomaly_score_median, Unset):
82
+ anomaly_score_median = UNSET
83
+ else:
84
+ anomaly_score_median = self.anomaly_score_median
85
+
86
+ anomaly_score_variance: Union[None, Unset, float]
87
+ if isinstance(self.anomaly_score_variance, Unset):
88
+ anomaly_score_variance = UNSET
89
+ else:
90
+ anomaly_score_variance = self.anomaly_score_variance
91
+
66
92
  field_dict: dict[str, Any] = {}
67
93
  field_dict.update(self.additional_properties)
68
94
  field_dict.update(
@@ -74,6 +100,12 @@ class ClassificationEvaluationResult:
74
100
  "roc_curve": roc_curve,
75
101
  }
76
102
  )
103
+ if anomaly_score_mean is not UNSET:
104
+ field_dict["anomaly_score_mean"] = anomaly_score_mean
105
+ if anomaly_score_median is not UNSET:
106
+ field_dict["anomaly_score_median"] = anomaly_score_median
107
+ if anomaly_score_variance is not UNSET:
108
+ field_dict["anomaly_score_variance"] = anomaly_score_variance
77
109
 
78
110
  return field_dict
79
111
 
@@ -119,12 +151,42 @@ class ClassificationEvaluationResult:
119
151
 
120
152
  roc_curve = _parse_roc_curve(d.pop("roc_curve"))
121
153
 
154
+ def _parse_anomaly_score_mean(data: object) -> Union[None, Unset, float]:
155
+ if data is None:
156
+ return data
157
+ if isinstance(data, Unset):
158
+ return data
159
+ return cast(Union[None, Unset, float], data)
160
+
161
+ anomaly_score_mean = _parse_anomaly_score_mean(d.pop("anomaly_score_mean", UNSET))
162
+
163
+ def _parse_anomaly_score_median(data: object) -> Union[None, Unset, float]:
164
+ if data is None:
165
+ return data
166
+ if isinstance(data, Unset):
167
+ return data
168
+ return cast(Union[None, Unset, float], data)
169
+
170
+ anomaly_score_median = _parse_anomaly_score_median(d.pop("anomaly_score_median", UNSET))
171
+
172
+ def _parse_anomaly_score_variance(data: object) -> Union[None, Unset, float]:
173
+ if data is None:
174
+ return data
175
+ if isinstance(data, Unset):
176
+ return data
177
+ return cast(Union[None, Unset, float], data)
178
+
179
+ anomaly_score_variance = _parse_anomaly_score_variance(d.pop("anomaly_score_variance", UNSET))
180
+
122
181
  classification_evaluation_result = cls(
123
182
  f1_score=f1_score,
124
183
  accuracy=accuracy,
125
184
  loss=loss,
126
185
  precision_recall_curve=precision_recall_curve,
127
186
  roc_curve=roc_curve,
187
+ anomaly_score_mean=anomaly_score_mean,
188
+ anomaly_score_median=anomaly_score_median,
189
+ anomaly_score_variance=anomaly_score_variance,
128
190
  )
129
191
 
130
192
  classification_evaluation_result.additional_properties = d
@@ -0,0 +1,195 @@
1
+ """
2
+ This file is generated by the openapi-python-client tool via the generate_api_client.py script
3
+
4
+ It is a customized template from the openapi-python-client tool's default template:
5
+ https://github.com/openapi-generators/openapi-python-client/blob/861ef5622f10fc96d240dc9becb0edf94e61446c/openapi_python_client/templates/model.py.jinja
6
+
7
+ The main change is:
8
+ - Fix typing issues
9
+ """
10
+
11
+ # flake8: noqa: C901
12
+
13
+ import datetime
14
+ from typing import Any, List, Type, TypeVar, Union, cast
15
+
16
+ from attrs import define as _attrs_define
17
+ from attrs import field as _attrs_field
18
+ from dateutil.parser import isoparse
19
+
20
+ from ..types import UNSET, Unset
21
+
22
+ T = TypeVar("T", bound="CountPredictionsRequest")
23
+
24
+
25
+ @_attrs_define
26
+ class CountPredictionsRequest:
27
+ """
28
+ Attributes:
29
+ model_id (Union[None, Unset, str]):
30
+ tag (Union[None, Unset, str]):
31
+ prediction_ids (Union[List[str], None, Unset]):
32
+ start_timestamp (Union[None, Unset, datetime.datetime]):
33
+ end_timestamp (Union[None, Unset, datetime.datetime]):
34
+ """
35
+
36
+ model_id: Union[None, Unset, str] = UNSET
37
+ tag: Union[None, Unset, str] = UNSET
38
+ prediction_ids: Union[List[str], None, Unset] = UNSET
39
+ start_timestamp: Union[None, Unset, datetime.datetime] = UNSET
40
+ end_timestamp: Union[None, Unset, datetime.datetime] = UNSET
41
+ additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
42
+
43
+ def to_dict(self) -> dict[str, Any]:
44
+ model_id: Union[None, Unset, str]
45
+ if isinstance(self.model_id, Unset):
46
+ model_id = UNSET
47
+ else:
48
+ model_id = self.model_id
49
+
50
+ tag: Union[None, Unset, str]
51
+ if isinstance(self.tag, Unset):
52
+ tag = UNSET
53
+ else:
54
+ tag = self.tag
55
+
56
+ prediction_ids: Union[List[str], None, Unset]
57
+ if isinstance(self.prediction_ids, Unset):
58
+ prediction_ids = UNSET
59
+ elif isinstance(self.prediction_ids, list):
60
+ prediction_ids = self.prediction_ids
61
+
62
+ else:
63
+ prediction_ids = self.prediction_ids
64
+
65
+ start_timestamp: Union[None, Unset, str]
66
+ if isinstance(self.start_timestamp, Unset):
67
+ start_timestamp = UNSET
68
+ elif isinstance(self.start_timestamp, datetime.datetime):
69
+ start_timestamp = self.start_timestamp.isoformat()
70
+ else:
71
+ start_timestamp = self.start_timestamp
72
+
73
+ end_timestamp: Union[None, Unset, str]
74
+ if isinstance(self.end_timestamp, Unset):
75
+ end_timestamp = UNSET
76
+ elif isinstance(self.end_timestamp, datetime.datetime):
77
+ end_timestamp = self.end_timestamp.isoformat()
78
+ else:
79
+ end_timestamp = self.end_timestamp
80
+
81
+ field_dict: dict[str, Any] = {}
82
+ field_dict.update(self.additional_properties)
83
+ field_dict.update({})
84
+ if model_id is not UNSET:
85
+ field_dict["model_id"] = model_id
86
+ if tag is not UNSET:
87
+ field_dict["tag"] = tag
88
+ if prediction_ids is not UNSET:
89
+ field_dict["prediction_ids"] = prediction_ids
90
+ if start_timestamp is not UNSET:
91
+ field_dict["start_timestamp"] = start_timestamp
92
+ if end_timestamp is not UNSET:
93
+ field_dict["end_timestamp"] = end_timestamp
94
+
95
+ return field_dict
96
+
97
+ @classmethod
98
+ def from_dict(cls: Type[T], src_dict: dict[str, Any]) -> T:
99
+ d = src_dict.copy()
100
+
101
+ def _parse_model_id(data: object) -> Union[None, Unset, str]:
102
+ if data is None:
103
+ return data
104
+ if isinstance(data, Unset):
105
+ return data
106
+ return cast(Union[None, Unset, str], data)
107
+
108
+ model_id = _parse_model_id(d.pop("model_id", UNSET))
109
+
110
+ def _parse_tag(data: object) -> Union[None, Unset, str]:
111
+ if data is None:
112
+ return data
113
+ if isinstance(data, Unset):
114
+ return data
115
+ return cast(Union[None, Unset, str], data)
116
+
117
+ tag = _parse_tag(d.pop("tag", UNSET))
118
+
119
+ def _parse_prediction_ids(data: object) -> Union[List[str], None, Unset]:
120
+ if data is None:
121
+ return data
122
+ if isinstance(data, Unset):
123
+ return data
124
+ try:
125
+ if not isinstance(data, list):
126
+ raise TypeError()
127
+ prediction_ids_type_0 = cast(List[str], data)
128
+
129
+ return prediction_ids_type_0
130
+ except: # noqa: E722
131
+ pass
132
+ return cast(Union[List[str], None, Unset], data)
133
+
134
+ prediction_ids = _parse_prediction_ids(d.pop("prediction_ids", UNSET))
135
+
136
+ def _parse_start_timestamp(data: object) -> Union[None, Unset, datetime.datetime]:
137
+ if data is None:
138
+ return data
139
+ if isinstance(data, Unset):
140
+ return data
141
+ try:
142
+ if not isinstance(data, str):
143
+ raise TypeError()
144
+ start_timestamp_type_0 = isoparse(data)
145
+
146
+ return start_timestamp_type_0
147
+ except: # noqa: E722
148
+ pass
149
+ return cast(Union[None, Unset, datetime.datetime], data)
150
+
151
+ start_timestamp = _parse_start_timestamp(d.pop("start_timestamp", UNSET))
152
+
153
+ def _parse_end_timestamp(data: object) -> Union[None, Unset, datetime.datetime]:
154
+ if data is None:
155
+ return data
156
+ if isinstance(data, Unset):
157
+ return data
158
+ try:
159
+ if not isinstance(data, str):
160
+ raise TypeError()
161
+ end_timestamp_type_0 = isoparse(data)
162
+
163
+ return end_timestamp_type_0
164
+ except: # noqa: E722
165
+ pass
166
+ return cast(Union[None, Unset, datetime.datetime], data)
167
+
168
+ end_timestamp = _parse_end_timestamp(d.pop("end_timestamp", UNSET))
169
+
170
+ count_predictions_request = cls(
171
+ model_id=model_id,
172
+ tag=tag,
173
+ prediction_ids=prediction_ids,
174
+ start_timestamp=start_timestamp,
175
+ end_timestamp=end_timestamp,
176
+ )
177
+
178
+ count_predictions_request.additional_properties = d
179
+ return count_predictions_request
180
+
181
+ @property
182
+ def additional_keys(self) -> list[str]:
183
+ return list(self.additional_properties.keys())
184
+
185
+ def __getitem__(self, key: str) -> Any:
186
+ return self.additional_properties[key]
187
+
188
+ def __setitem__(self, key: str, value: Any) -> None:
189
+ self.additional_properties[key] = value
190
+
191
+ def __delitem__(self, key: str) -> None:
192
+ del self.additional_properties[key]
193
+
194
+ def __contains__(self, key: str) -> bool:
195
+ return key in self.additional_properties
@@ -3,6 +3,7 @@ from enum import Enum
3
3
 
4
4
  class FilterItemFieldType0Item(str, Enum):
5
5
  CREATED_AT = "created_at"
6
+ EDITED_AT = "edited_at"
6
7
  LABEL = "label"
7
8
  MEMORY_ID = "memory_id"
8
9
  METADATA = "metadata"