mixpeek 0.15.0__py3-none-any.whl → 0.15.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. mixpeek/_version.py +1 -1
  2. mixpeek/assets.py +8 -8
  3. mixpeek/collections.py +4 -4
  4. mixpeek/featureextractors.py +6 -6
  5. mixpeek/features.py +20 -20
  6. mixpeek/ingest.py +12 -54
  7. mixpeek/models/__init__.py +27 -108
  8. mixpeek/models/assetresponse.py +4 -4
  9. mixpeek/models/availableindexesresponse.py +2 -2
  10. mixpeek/models/availablemodels.py +4 -0
  11. mixpeek/models/createnamespacerequest.py +4 -4
  12. mixpeek/models/embeddingrequest.py +2 -2
  13. mixpeek/models/entitysettings.py +50 -0
  14. mixpeek/models/featureextractionembeddingrequest.py +2 -2
  15. mixpeek/models/imagedescribesettings.py +6 -6
  16. mixpeek/models/imagereadsettings.py +6 -6
  17. mixpeek/models/imagesettings.py +17 -4
  18. mixpeek/models/listassetsrequest.py +3 -3
  19. mixpeek/models/listfeaturesrequest.py +3 -3
  20. mixpeek/models/{logicaloperator_input.py → logicaloperator.py} +8 -8
  21. mixpeek/models/namespaceresponse.py +2 -2
  22. mixpeek/models/processimageurlinput.py +1 -13
  23. mixpeek/models/processtextinput.py +1 -13
  24. mixpeek/models/processvideourlinput.py +1 -13
  25. mixpeek/models/search_features_features_search_postop.py +4 -7
  26. mixpeek/models/{search_model_searchquery_input.py → search_model_searchquery.py} +7 -7
  27. mixpeek/models/searchassetsrequest.py +3 -3
  28. mixpeek/models/{searchrequestfeatures_output.py → searchrequestfeatures.py} +11 -11
  29. mixpeek/models/{percolaterequest.py → taskresponse.py} +15 -17
  30. mixpeek/models/taskstatus.py +1 -0
  31. mixpeek/models/taxonomyextractionconfig.py +31 -0
  32. mixpeek/models/textsettings.py +10 -4
  33. mixpeek/models/vectormodel.py +4 -0
  34. mixpeek/models/videodescribesettings.py +6 -6
  35. mixpeek/models/videoreadsettings.py +6 -6
  36. mixpeek/models/videosettings.py +17 -3
  37. mixpeek/models/videotranscriptionsettings.py +6 -6
  38. mixpeek/namespaces.py +6 -6
  39. mixpeek/sdk.py +0 -6
  40. mixpeek/sdkconfiguration.py +2 -2
  41. mixpeek/tasks.py +4 -4
  42. {mixpeek-0.15.0.dist-info → mixpeek-0.15.1.dist-info}/METADATA +1 -11
  43. {mixpeek-0.15.0.dist-info → mixpeek-0.15.1.dist-info}/RECORD +44 -56
  44. mixpeek/interactions.py +0 -228
  45. mixpeek/models/create_interaction_features_search_interactions_postop.py +0 -59
  46. mixpeek/models/db_model_taskresponse.py +0 -20
  47. mixpeek/models/delete_interaction_features_search_interactions_interaction_id_deleteop.py +0 -59
  48. mixpeek/models/get_interaction_features_search_interactions_interaction_id_getop.py +0 -59
  49. mixpeek/models/interactionresponse.py +0 -87
  50. mixpeek/models/interactiontype.py +0 -11
  51. mixpeek/models/list_interactions_features_search_interactions_getop.py +0 -96
  52. mixpeek/models/logicaloperator_output.py +0 -103
  53. mixpeek/models/searchinteraction.py +0 -82
  54. mixpeek/models/searchquery_output.py +0 -79
  55. mixpeek/models/searchrequestfeatures_input.py +0 -151
  56. mixpeek/models/tasks_model_taskresponse.py +0 -24
  57. mixpeek/searchinteractions.py +0 -666
  58. {mixpeek-0.15.0.dist-info → mixpeek-0.15.1.dist-info}/WHEEL +0 -0
mixpeek/_version.py CHANGED
@@ -3,7 +3,7 @@
3
3
  import importlib.metadata
4
4
 
5
5
  __title__: str = "mixpeek"
6
- __version__: str = "0.15.0"
6
+ __version__: str = "0.15.1"
7
7
 
8
8
  try:
9
9
  if __package__ is not None:
mixpeek/assets.py CHANGED
@@ -1075,7 +1075,7 @@ class Assets(BaseSDK):
1075
1075
  page_size: Optional[int] = 10,
1076
1076
  x_namespace: OptionalNullable[str] = UNSET,
1077
1077
  filters: OptionalNullable[
1078
- Union[models.LogicalOperatorInput, models.LogicalOperatorInputTypedDict]
1078
+ Union[models.LogicalOperator, models.LogicalOperatorTypedDict]
1079
1079
  ] = UNSET,
1080
1080
  group_by: OptionalNullable[
1081
1081
  Union[models.GroupByOptionsAsset, models.GroupByOptionsAssetTypedDict]
@@ -1121,7 +1121,7 @@ class Assets(BaseSDK):
1121
1121
  list_assets_request=models.ListAssetsRequest(
1122
1122
  collections=collections,
1123
1123
  filters=utils.get_pydantic_model(
1124
- filters, OptionalNullable[models.LogicalOperatorInput]
1124
+ filters, OptionalNullable[models.LogicalOperator]
1125
1125
  ),
1126
1126
  group_by=utils.get_pydantic_model(
1127
1127
  group_by, OptionalNullable[models.GroupByOptionsAsset]
@@ -1212,7 +1212,7 @@ class Assets(BaseSDK):
1212
1212
  page_size: Optional[int] = 10,
1213
1213
  x_namespace: OptionalNullable[str] = UNSET,
1214
1214
  filters: OptionalNullable[
1215
- Union[models.LogicalOperatorInput, models.LogicalOperatorInputTypedDict]
1215
+ Union[models.LogicalOperator, models.LogicalOperatorTypedDict]
1216
1216
  ] = UNSET,
1217
1217
  group_by: OptionalNullable[
1218
1218
  Union[models.GroupByOptionsAsset, models.GroupByOptionsAssetTypedDict]
@@ -1258,7 +1258,7 @@ class Assets(BaseSDK):
1258
1258
  list_assets_request=models.ListAssetsRequest(
1259
1259
  collections=collections,
1260
1260
  filters=utils.get_pydantic_model(
1261
- filters, OptionalNullable[models.LogicalOperatorInput]
1261
+ filters, OptionalNullable[models.LogicalOperator]
1262
1262
  ),
1263
1263
  group_by=utils.get_pydantic_model(
1264
1264
  group_by, OptionalNullable[models.GroupByOptionsAsset]
@@ -1350,7 +1350,7 @@ class Assets(BaseSDK):
1350
1350
  Union[models.AssetsModelSearchQuery, models.AssetsModelSearchQueryTypedDict]
1351
1351
  ] = UNSET,
1352
1352
  filters: OptionalNullable[
1353
- Union[models.LogicalOperatorInput, models.LogicalOperatorInputTypedDict]
1353
+ Union[models.LogicalOperator, models.LogicalOperatorTypedDict]
1354
1354
  ] = UNSET,
1355
1355
  sort: OptionalNullable[
1356
1356
  Union[models.SortOption, models.SortOptionTypedDict]
@@ -1392,7 +1392,7 @@ class Assets(BaseSDK):
1392
1392
  ),
1393
1393
  collections=collections,
1394
1394
  filters=utils.get_pydantic_model(
1395
- filters, OptionalNullable[models.LogicalOperatorInput]
1395
+ filters, OptionalNullable[models.LogicalOperator]
1396
1396
  ),
1397
1397
  sort=utils.get_pydantic_model(
1398
1398
  sort, OptionalNullable[models.SortOption]
@@ -1481,7 +1481,7 @@ class Assets(BaseSDK):
1481
1481
  Union[models.AssetsModelSearchQuery, models.AssetsModelSearchQueryTypedDict]
1482
1482
  ] = UNSET,
1483
1483
  filters: OptionalNullable[
1484
- Union[models.LogicalOperatorInput, models.LogicalOperatorInputTypedDict]
1484
+ Union[models.LogicalOperator, models.LogicalOperatorTypedDict]
1485
1485
  ] = UNSET,
1486
1486
  sort: OptionalNullable[
1487
1487
  Union[models.SortOption, models.SortOptionTypedDict]
@@ -1523,7 +1523,7 @@ class Assets(BaseSDK):
1523
1523
  ),
1524
1524
  collections=collections,
1525
1525
  filters=utils.get_pydantic_model(
1526
- filters, OptionalNullable[models.LogicalOperatorInput]
1526
+ filters, OptionalNullable[models.LogicalOperator]
1527
1527
  ),
1528
1528
  sort=utils.get_pydantic_model(
1529
1529
  sort, OptionalNullable[models.SortOption]
mixpeek/collections.py CHANGED
@@ -442,7 +442,7 @@ class Collections(BaseSDK):
442
442
  server_url: Optional[str] = None,
443
443
  timeout_ms: Optional[int] = None,
444
444
  http_headers: Optional[Mapping[str, str]] = None,
445
- ) -> Any:
445
+ ) -> models.TaskResponse:
446
446
  r"""Delete Collection
447
447
 
448
448
  Delete a collection using either its name or ID
@@ -506,7 +506,7 @@ class Collections(BaseSDK):
506
506
 
507
507
  data: Any = None
508
508
  if utils.match_response(http_res, "200", "application/json"):
509
- return utils.unmarshal_json(http_res.text, Any)
509
+ return utils.unmarshal_json(http_res.text, models.TaskResponse)
510
510
  if utils.match_response(
511
511
  http_res, ["400", "401", "403", "404", "500"], "application/json"
512
512
  ):
@@ -539,7 +539,7 @@ class Collections(BaseSDK):
539
539
  server_url: Optional[str] = None,
540
540
  timeout_ms: Optional[int] = None,
541
541
  http_headers: Optional[Mapping[str, str]] = None,
542
- ) -> Any:
542
+ ) -> models.TaskResponse:
543
543
  r"""Delete Collection
544
544
 
545
545
  Delete a collection using either its name or ID
@@ -603,7 +603,7 @@ class Collections(BaseSDK):
603
603
 
604
604
  data: Any = None
605
605
  if utils.match_response(http_res, "200", "application/json"):
606
- return utils.unmarshal_json(http_res.text, Any)
606
+ return utils.unmarshal_json(http_res.text, models.TaskResponse)
607
607
  if utils.match_response(
608
608
  http_res, ["400", "401", "403", "404", "500"], "application/json"
609
609
  ):
@@ -13,7 +13,7 @@ class FeatureExtractors(BaseSDK):
13
13
  self,
14
14
  *,
15
15
  type_: models.InputType,
16
- vector_index: models.VectorModel,
16
+ embedding_model: models.VectorModel,
17
17
  value: OptionalNullable[str] = UNSET,
18
18
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
19
19
  server_url: Optional[str] = None,
@@ -23,7 +23,7 @@ class FeatureExtractors(BaseSDK):
23
23
  r"""Extract Embeddings
24
24
 
25
25
  :param type:
26
- :param vector_index:
26
+ :param embedding_model:
27
27
  :param value: The input content to embed. Could be a URL, text content, file path, or base64 encoded string
28
28
  :param retries: Override the default retry configuration for this method
29
29
  :param server_url: Override the default server URL for this method
@@ -41,7 +41,7 @@ class FeatureExtractors(BaseSDK):
41
41
  request = models.FeatureExtractionEmbeddingRequest(
42
42
  type=type_,
43
43
  value=value,
44
- vector_index=vector_index,
44
+ embedding_model=embedding_model,
45
45
  )
46
46
 
47
47
  req = self._build_request(
@@ -114,7 +114,7 @@ class FeatureExtractors(BaseSDK):
114
114
  self,
115
115
  *,
116
116
  type_: models.InputType,
117
- vector_index: models.VectorModel,
117
+ embedding_model: models.VectorModel,
118
118
  value: OptionalNullable[str] = UNSET,
119
119
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
120
120
  server_url: Optional[str] = None,
@@ -124,7 +124,7 @@ class FeatureExtractors(BaseSDK):
124
124
  r"""Extract Embeddings
125
125
 
126
126
  :param type:
127
- :param vector_index:
127
+ :param embedding_model:
128
128
  :param value: The input content to embed. Could be a URL, text content, file path, or base64 encoded string
129
129
  :param retries: Override the default retry configuration for this method
130
130
  :param server_url: Override the default server URL for this method
@@ -142,7 +142,7 @@ class FeatureExtractors(BaseSDK):
142
142
  request = models.FeatureExtractionEmbeddingRequest(
143
143
  type=type_,
144
144
  value=value,
145
- vector_index=vector_index,
145
+ embedding_model=embedding_model,
146
146
  )
147
147
 
148
148
  req = self._build_request_async(
mixpeek/features.py CHANGED
@@ -627,7 +627,7 @@ class Features(BaseSDK):
627
627
  page_size: Optional[int] = 10,
628
628
  x_namespace: OptionalNullable[str] = UNSET,
629
629
  filters: OptionalNullable[
630
- Union[models.LogicalOperatorInput, models.LogicalOperatorInputTypedDict]
630
+ Union[models.LogicalOperator, models.LogicalOperatorTypedDict]
631
631
  ] = UNSET,
632
632
  sort: OptionalNullable[
633
633
  Union[models.SortOption, models.SortOptionTypedDict]
@@ -673,7 +673,7 @@ class Features(BaseSDK):
673
673
  list_features_request=models.ListFeaturesRequest(
674
674
  collections=collections,
675
675
  filters=utils.get_pydantic_model(
676
- filters, OptionalNullable[models.LogicalOperatorInput]
676
+ filters, OptionalNullable[models.LogicalOperator]
677
677
  ),
678
678
  sort=utils.get_pydantic_model(
679
679
  sort, OptionalNullable[models.SortOption]
@@ -761,7 +761,7 @@ class Features(BaseSDK):
761
761
  page_size: Optional[int] = 10,
762
762
  x_namespace: OptionalNullable[str] = UNSET,
763
763
  filters: OptionalNullable[
764
- Union[models.LogicalOperatorInput, models.LogicalOperatorInputTypedDict]
764
+ Union[models.LogicalOperator, models.LogicalOperatorTypedDict]
765
765
  ] = UNSET,
766
766
  sort: OptionalNullable[
767
767
  Union[models.SortOption, models.SortOptionTypedDict]
@@ -807,7 +807,7 @@ class Features(BaseSDK):
807
807
  list_features_request=models.ListFeaturesRequest(
808
808
  collections=collections,
809
809
  filters=utils.get_pydantic_model(
810
- filters, OptionalNullable[models.LogicalOperatorInput]
810
+ filters, OptionalNullable[models.LogicalOperator]
811
811
  ),
812
812
  sort=utils.get_pydantic_model(
813
813
  sort, OptionalNullable[models.SortOption]
@@ -891,15 +891,15 @@ class Features(BaseSDK):
891
891
  self,
892
892
  *,
893
893
  queries: Union[
894
- List[models.SearchModelSearchQueryInput],
895
- List[models.SearchModelSearchQueryInputTypedDict],
894
+ List[models.SearchModelSearchQuery],
895
+ List[models.SearchModelSearchQueryTypedDict],
896
896
  ],
897
897
  collections: List[str],
898
898
  offset_position: OptionalNullable[int] = UNSET,
899
899
  page_size: Optional[int] = 10,
900
900
  x_namespace: OptionalNullable[str] = UNSET,
901
901
  filters: OptionalNullable[
902
- Union[models.LogicalOperatorInput, models.LogicalOperatorInputTypedDict]
902
+ Union[models.LogicalOperator, models.LogicalOperatorTypedDict]
903
903
  ] = UNSET,
904
904
  group_by: OptionalNullable[
905
905
  Union[models.GroupByOptions, models.GroupByOptionsTypedDict]
@@ -953,13 +953,13 @@ class Features(BaseSDK):
953
953
  offset_position=offset_position,
954
954
  page_size=page_size,
955
955
  x_namespace=x_namespace,
956
- search_request_features_input=models.SearchRequestFeaturesInput(
956
+ search_request_features=models.SearchRequestFeatures(
957
957
  queries=utils.get_pydantic_model(
958
- queries, List[models.SearchModelSearchQueryInput]
958
+ queries, List[models.SearchModelSearchQuery]
959
959
  ),
960
960
  collections=collections,
961
961
  filters=utils.get_pydantic_model(
962
- filters, OptionalNullable[models.LogicalOperatorInput]
962
+ filters, OptionalNullable[models.LogicalOperator]
963
963
  ),
964
964
  group_by=utils.get_pydantic_model(
965
965
  group_by, OptionalNullable[models.GroupByOptions]
@@ -990,11 +990,11 @@ class Features(BaseSDK):
990
990
  http_headers=http_headers,
991
991
  security=self.sdk_configuration.security,
992
992
  get_serialized_body=lambda: utils.serialize_request_body(
993
- request.search_request_features_input,
993
+ request.search_request_features,
994
994
  False,
995
995
  False,
996
996
  "json",
997
- models.SearchRequestFeaturesInput,
997
+ models.SearchRequestFeatures,
998
998
  ),
999
999
  timeout_ms=timeout_ms,
1000
1000
  )
@@ -1053,15 +1053,15 @@ class Features(BaseSDK):
1053
1053
  self,
1054
1054
  *,
1055
1055
  queries: Union[
1056
- List[models.SearchModelSearchQueryInput],
1057
- List[models.SearchModelSearchQueryInputTypedDict],
1056
+ List[models.SearchModelSearchQuery],
1057
+ List[models.SearchModelSearchQueryTypedDict],
1058
1058
  ],
1059
1059
  collections: List[str],
1060
1060
  offset_position: OptionalNullable[int] = UNSET,
1061
1061
  page_size: Optional[int] = 10,
1062
1062
  x_namespace: OptionalNullable[str] = UNSET,
1063
1063
  filters: OptionalNullable[
1064
- Union[models.LogicalOperatorInput, models.LogicalOperatorInputTypedDict]
1064
+ Union[models.LogicalOperator, models.LogicalOperatorTypedDict]
1065
1065
  ] = UNSET,
1066
1066
  group_by: OptionalNullable[
1067
1067
  Union[models.GroupByOptions, models.GroupByOptionsTypedDict]
@@ -1115,13 +1115,13 @@ class Features(BaseSDK):
1115
1115
  offset_position=offset_position,
1116
1116
  page_size=page_size,
1117
1117
  x_namespace=x_namespace,
1118
- search_request_features_input=models.SearchRequestFeaturesInput(
1118
+ search_request_features=models.SearchRequestFeatures(
1119
1119
  queries=utils.get_pydantic_model(
1120
- queries, List[models.SearchModelSearchQueryInput]
1120
+ queries, List[models.SearchModelSearchQuery]
1121
1121
  ),
1122
1122
  collections=collections,
1123
1123
  filters=utils.get_pydantic_model(
1124
- filters, OptionalNullable[models.LogicalOperatorInput]
1124
+ filters, OptionalNullable[models.LogicalOperator]
1125
1125
  ),
1126
1126
  group_by=utils.get_pydantic_model(
1127
1127
  group_by, OptionalNullable[models.GroupByOptions]
@@ -1152,11 +1152,11 @@ class Features(BaseSDK):
1152
1152
  http_headers=http_headers,
1153
1153
  security=self.sdk_configuration.security,
1154
1154
  get_serialized_body=lambda: utils.serialize_request_body(
1155
- request.search_request_features_input,
1155
+ request.search_request_features,
1156
1156
  False,
1157
1157
  False,
1158
1158
  "json",
1159
- models.SearchRequestFeaturesInput,
1159
+ models.SearchRequestFeatures,
1160
1160
  ),
1161
1161
  timeout_ms=timeout_ms,
1162
1162
  )
mixpeek/ingest.py CHANGED
@@ -26,15 +26,12 @@ class Ingest(BaseSDK):
26
26
  feature_extractors: OptionalNullable[
27
27
  Union[models.TextSettings, models.TextSettingsTypedDict]
28
28
  ] = UNSET,
29
- percolate: OptionalNullable[
30
- Union[models.PercolateRequest, models.PercolateRequestTypedDict]
31
- ] = UNSET,
32
29
  skip_duplicate: OptionalNullable[bool] = UNSET,
33
30
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
34
31
  server_url: Optional[str] = None,
35
32
  timeout_ms: Optional[int] = None,
36
33
  http_headers: Optional[Mapping[str, str]] = None,
37
- ) -> models.DbModelTaskResponse:
34
+ ) -> models.TaskResponse:
38
35
  r"""Ingest Text
39
36
 
40
37
  :param collection: Unique identifier for the collection where the processed asset will be stored, can be the collection name or collection ID. If neither exist, the collection will be created.
@@ -42,7 +39,6 @@ class Ingest(BaseSDK):
42
39
  :param asset_update: Controls how processing results are stored - either creating a new asset or updating an existing one.
43
40
  :param metadata: Additional metadata associated with the file. Can include any key-value pairs relevant to the file.
44
41
  :param feature_extractors: Settings for text processing.
45
- :param percolate: Settings for percolating the asset against stored queries.
46
42
  :param skip_duplicate: Skips processing when a duplicate hash is found and stores an error by the task_id with the existing asset_id
47
43
  :param retries: Override the default retry configuration for this method
48
44
  :param server_url: Override the default server URL for this method
@@ -70,9 +66,6 @@ class Ingest(BaseSDK):
70
66
  feature_extractors=utils.get_pydantic_model(
71
67
  feature_extractors, OptionalNullable[models.TextSettings]
72
68
  ),
73
- percolate=utils.get_pydantic_model(
74
- percolate, OptionalNullable[models.PercolateRequest]
75
- ),
76
69
  skip_duplicate=skip_duplicate,
77
70
  ),
78
71
  )
@@ -123,7 +116,7 @@ class Ingest(BaseSDK):
123
116
 
124
117
  data: Any = None
125
118
  if utils.match_response(http_res, "200", "application/json"):
126
- return utils.unmarshal_json(http_res.text, models.DbModelTaskResponse)
119
+ return utils.unmarshal_json(http_res.text, models.TaskResponse)
127
120
  if utils.match_response(
128
121
  http_res, ["400", "401", "403", "404", "500"], "application/json"
129
122
  ):
@@ -164,15 +157,12 @@ class Ingest(BaseSDK):
164
157
  feature_extractors: OptionalNullable[
165
158
  Union[models.TextSettings, models.TextSettingsTypedDict]
166
159
  ] = UNSET,
167
- percolate: OptionalNullable[
168
- Union[models.PercolateRequest, models.PercolateRequestTypedDict]
169
- ] = UNSET,
170
160
  skip_duplicate: OptionalNullable[bool] = UNSET,
171
161
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
172
162
  server_url: Optional[str] = None,
173
163
  timeout_ms: Optional[int] = None,
174
164
  http_headers: Optional[Mapping[str, str]] = None,
175
- ) -> models.DbModelTaskResponse:
165
+ ) -> models.TaskResponse:
176
166
  r"""Ingest Text
177
167
 
178
168
  :param collection: Unique identifier for the collection where the processed asset will be stored, can be the collection name or collection ID. If neither exist, the collection will be created.
@@ -180,7 +170,6 @@ class Ingest(BaseSDK):
180
170
  :param asset_update: Controls how processing results are stored - either creating a new asset or updating an existing one.
181
171
  :param metadata: Additional metadata associated with the file. Can include any key-value pairs relevant to the file.
182
172
  :param feature_extractors: Settings for text processing.
183
- :param percolate: Settings for percolating the asset against stored queries.
184
173
  :param skip_duplicate: Skips processing when a duplicate hash is found and stores an error by the task_id with the existing asset_id
185
174
  :param retries: Override the default retry configuration for this method
186
175
  :param server_url: Override the default server URL for this method
@@ -208,9 +197,6 @@ class Ingest(BaseSDK):
208
197
  feature_extractors=utils.get_pydantic_model(
209
198
  feature_extractors, OptionalNullable[models.TextSettings]
210
199
  ),
211
- percolate=utils.get_pydantic_model(
212
- percolate, OptionalNullable[models.PercolateRequest]
213
- ),
214
200
  skip_duplicate=skip_duplicate,
215
201
  ),
216
202
  )
@@ -261,7 +247,7 @@ class Ingest(BaseSDK):
261
247
 
262
248
  data: Any = None
263
249
  if utils.match_response(http_res, "200", "application/json"):
264
- return utils.unmarshal_json(http_res.text, models.DbModelTaskResponse)
250
+ return utils.unmarshal_json(http_res.text, models.TaskResponse)
265
251
  if utils.match_response(
266
252
  http_res, ["400", "401", "403", "404", "500"], "application/json"
267
253
  ):
@@ -300,9 +286,6 @@ class Ingest(BaseSDK):
300
286
  models.ProcessVideoURLInputMetadataTypedDict,
301
287
  ]
302
288
  ] = None,
303
- percolate: OptionalNullable[
304
- Union[models.PercolateRequest, models.PercolateRequestTypedDict]
305
- ] = UNSET,
306
289
  skip_duplicate: OptionalNullable[bool] = UNSET,
307
290
  feature_extractors: OptionalNullable[
308
291
  Union[List[models.VideoSettings], List[models.VideoSettingsTypedDict]]
@@ -311,7 +294,7 @@ class Ingest(BaseSDK):
311
294
  server_url: Optional[str] = None,
312
295
  timeout_ms: Optional[int] = None,
313
296
  http_headers: Optional[Mapping[str, str]] = None,
314
- ) -> models.DbModelTaskResponse:
297
+ ) -> models.TaskResponse:
315
298
  r"""Ingest Video Url
316
299
 
317
300
  :param url: The URL of the asset to be processed. Must be a valid HTTP or HTTPS URL.
@@ -319,7 +302,6 @@ class Ingest(BaseSDK):
319
302
  :param x_namespace: Optional namespace for data isolation. This can be a namespace name or namespace ID. Example: 'netflix_prod' or 'ns_1234567890'. To create a namespace, use the /namespaces endpoint.
320
303
  :param asset_update: Controls how processing results are stored - either creating a new asset or updating an existing one.
321
304
  :param metadata: Additional metadata associated with the asset. Can include any key-value pairs relevant to the asset.
322
- :param percolate: Settings for percolating the asset against stored queries.
323
305
  :param skip_duplicate: Makes feature extraction idempotent. When True and a duplicate file hash is found, copies features from the existing asset instead of reprocessing. This allows the same file to be used multiple times with different metadata while avoiding redundant processing.
324
306
  :param feature_extractors: Settings for video processing. Only applicable if the URL points to a video file.
325
307
  :param retries: Override the default retry configuration for this method
@@ -346,9 +328,6 @@ class Ingest(BaseSDK):
346
328
  metadata=utils.get_pydantic_model(
347
329
  metadata, Optional[models.ProcessVideoURLInputMetadata]
348
330
  ),
349
- percolate=utils.get_pydantic_model(
350
- percolate, OptionalNullable[models.PercolateRequest]
351
- ),
352
331
  skip_duplicate=skip_duplicate,
353
332
  feature_extractors=utils.get_pydantic_model(
354
333
  feature_extractors, OptionalNullable[List[models.VideoSettings]]
@@ -402,7 +381,7 @@ class Ingest(BaseSDK):
402
381
 
403
382
  data: Any = None
404
383
  if utils.match_response(http_res, "200", "application/json"):
405
- return utils.unmarshal_json(http_res.text, models.DbModelTaskResponse)
384
+ return utils.unmarshal_json(http_res.text, models.TaskResponse)
406
385
  if utils.match_response(
407
386
  http_res, ["400", "401", "403", "404", "500"], "application/json"
408
387
  ):
@@ -441,9 +420,6 @@ class Ingest(BaseSDK):
441
420
  models.ProcessVideoURLInputMetadataTypedDict,
442
421
  ]
443
422
  ] = None,
444
- percolate: OptionalNullable[
445
- Union[models.PercolateRequest, models.PercolateRequestTypedDict]
446
- ] = UNSET,
447
423
  skip_duplicate: OptionalNullable[bool] = UNSET,
448
424
  feature_extractors: OptionalNullable[
449
425
  Union[List[models.VideoSettings], List[models.VideoSettingsTypedDict]]
@@ -452,7 +428,7 @@ class Ingest(BaseSDK):
452
428
  server_url: Optional[str] = None,
453
429
  timeout_ms: Optional[int] = None,
454
430
  http_headers: Optional[Mapping[str, str]] = None,
455
- ) -> models.DbModelTaskResponse:
431
+ ) -> models.TaskResponse:
456
432
  r"""Ingest Video Url
457
433
 
458
434
  :param url: The URL of the asset to be processed. Must be a valid HTTP or HTTPS URL.
@@ -460,7 +436,6 @@ class Ingest(BaseSDK):
460
436
  :param x_namespace: Optional namespace for data isolation. This can be a namespace name or namespace ID. Example: 'netflix_prod' or 'ns_1234567890'. To create a namespace, use the /namespaces endpoint.
461
437
  :param asset_update: Controls how processing results are stored - either creating a new asset or updating an existing one.
462
438
  :param metadata: Additional metadata associated with the asset. Can include any key-value pairs relevant to the asset.
463
- :param percolate: Settings for percolating the asset against stored queries.
464
439
  :param skip_duplicate: Makes feature extraction idempotent. When True and a duplicate file hash is found, copies features from the existing asset instead of reprocessing. This allows the same file to be used multiple times with different metadata while avoiding redundant processing.
465
440
  :param feature_extractors: Settings for video processing. Only applicable if the URL points to a video file.
466
441
  :param retries: Override the default retry configuration for this method
@@ -487,9 +462,6 @@ class Ingest(BaseSDK):
487
462
  metadata=utils.get_pydantic_model(
488
463
  metadata, Optional[models.ProcessVideoURLInputMetadata]
489
464
  ),
490
- percolate=utils.get_pydantic_model(
491
- percolate, OptionalNullable[models.PercolateRequest]
492
- ),
493
465
  skip_duplicate=skip_duplicate,
494
466
  feature_extractors=utils.get_pydantic_model(
495
467
  feature_extractors, OptionalNullable[List[models.VideoSettings]]
@@ -543,7 +515,7 @@ class Ingest(BaseSDK):
543
515
 
544
516
  data: Any = None
545
517
  if utils.match_response(http_res, "200", "application/json"):
546
- return utils.unmarshal_json(http_res.text, models.DbModelTaskResponse)
518
+ return utils.unmarshal_json(http_res.text, models.TaskResponse)
547
519
  if utils.match_response(
548
520
  http_res, ["400", "401", "403", "404", "500"], "application/json"
549
521
  ):
@@ -582,9 +554,6 @@ class Ingest(BaseSDK):
582
554
  models.ProcessImageURLInputMetadataTypedDict,
583
555
  ]
584
556
  ] = None,
585
- percolate: OptionalNullable[
586
- Union[models.PercolateRequest, models.PercolateRequestTypedDict]
587
- ] = UNSET,
588
557
  skip_duplicate: OptionalNullable[bool] = UNSET,
589
558
  feature_extractors: OptionalNullable[
590
559
  Union[models.ImageSettings, models.ImageSettingsTypedDict]
@@ -593,7 +562,7 @@ class Ingest(BaseSDK):
593
562
  server_url: Optional[str] = None,
594
563
  timeout_ms: Optional[int] = None,
595
564
  http_headers: Optional[Mapping[str, str]] = None,
596
- ) -> models.DbModelTaskResponse:
565
+ ) -> models.TaskResponse:
597
566
  r"""Ingest Image Url
598
567
 
599
568
  :param url: The URL of the asset to be processed. Must be a valid HTTP or HTTPS URL.
@@ -601,7 +570,6 @@ class Ingest(BaseSDK):
601
570
  :param x_namespace: Optional namespace for data isolation. This can be a namespace name or namespace ID. Example: 'netflix_prod' or 'ns_1234567890'. To create a namespace, use the /namespaces endpoint.
602
571
  :param asset_update: Controls how processing results are stored - either creating a new asset or updating an existing one.
603
572
  :param metadata: Additional metadata associated with the asset. Can include any key-value pairs relevant to the asset.
604
- :param percolate: Settings for percolating the asset against stored queries.
605
573
  :param skip_duplicate: Makes feature extraction idempotent. When True and a duplicate file hash is found, copies features from the existing asset instead of reprocessing. This allows the same file to be used multiple times with different metadata while avoiding redundant processing.
606
574
  :param feature_extractors: Settings for image processing. Only applicable if the URL points to an image file.
607
575
  :param retries: Override the default retry configuration for this method
@@ -628,9 +596,6 @@ class Ingest(BaseSDK):
628
596
  metadata=utils.get_pydantic_model(
629
597
  metadata, Optional[models.ProcessImageURLInputMetadata]
630
598
  ),
631
- percolate=utils.get_pydantic_model(
632
- percolate, OptionalNullable[models.PercolateRequest]
633
- ),
634
599
  skip_duplicate=skip_duplicate,
635
600
  feature_extractors=utils.get_pydantic_model(
636
601
  feature_extractors, OptionalNullable[models.ImageSettings]
@@ -684,7 +649,7 @@ class Ingest(BaseSDK):
684
649
 
685
650
  data: Any = None
686
651
  if utils.match_response(http_res, "200", "application/json"):
687
- return utils.unmarshal_json(http_res.text, models.DbModelTaskResponse)
652
+ return utils.unmarshal_json(http_res.text, models.TaskResponse)
688
653
  if utils.match_response(
689
654
  http_res, ["400", "401", "403", "404", "500"], "application/json"
690
655
  ):
@@ -723,9 +688,6 @@ class Ingest(BaseSDK):
723
688
  models.ProcessImageURLInputMetadataTypedDict,
724
689
  ]
725
690
  ] = None,
726
- percolate: OptionalNullable[
727
- Union[models.PercolateRequest, models.PercolateRequestTypedDict]
728
- ] = UNSET,
729
691
  skip_duplicate: OptionalNullable[bool] = UNSET,
730
692
  feature_extractors: OptionalNullable[
731
693
  Union[models.ImageSettings, models.ImageSettingsTypedDict]
@@ -734,7 +696,7 @@ class Ingest(BaseSDK):
734
696
  server_url: Optional[str] = None,
735
697
  timeout_ms: Optional[int] = None,
736
698
  http_headers: Optional[Mapping[str, str]] = None,
737
- ) -> models.DbModelTaskResponse:
699
+ ) -> models.TaskResponse:
738
700
  r"""Ingest Image Url
739
701
 
740
702
  :param url: The URL of the asset to be processed. Must be a valid HTTP or HTTPS URL.
@@ -742,7 +704,6 @@ class Ingest(BaseSDK):
742
704
  :param x_namespace: Optional namespace for data isolation. This can be a namespace name or namespace ID. Example: 'netflix_prod' or 'ns_1234567890'. To create a namespace, use the /namespaces endpoint.
743
705
  :param asset_update: Controls how processing results are stored - either creating a new asset or updating an existing one.
744
706
  :param metadata: Additional metadata associated with the asset. Can include any key-value pairs relevant to the asset.
745
- :param percolate: Settings for percolating the asset against stored queries.
746
707
  :param skip_duplicate: Makes feature extraction idempotent. When True and a duplicate file hash is found, copies features from the existing asset instead of reprocessing. This allows the same file to be used multiple times with different metadata while avoiding redundant processing.
747
708
  :param feature_extractors: Settings for image processing. Only applicable if the URL points to an image file.
748
709
  :param retries: Override the default retry configuration for this method
@@ -769,9 +730,6 @@ class Ingest(BaseSDK):
769
730
  metadata=utils.get_pydantic_model(
770
731
  metadata, Optional[models.ProcessImageURLInputMetadata]
771
732
  ),
772
- percolate=utils.get_pydantic_model(
773
- percolate, OptionalNullable[models.PercolateRequest]
774
- ),
775
733
  skip_duplicate=skip_duplicate,
776
734
  feature_extractors=utils.get_pydantic_model(
777
735
  feature_extractors, OptionalNullable[models.ImageSettings]
@@ -825,7 +783,7 @@ class Ingest(BaseSDK):
825
783
 
826
784
  data: Any = None
827
785
  if utils.match_response(http_res, "200", "application/json"):
828
- return utils.unmarshal_json(http_res.text, models.DbModelTaskResponse)
786
+ return utils.unmarshal_json(http_res.text, models.TaskResponse)
829
787
  if utils.match_response(
830
788
  http_res, ["400", "401", "403", "404", "500"], "application/json"
831
789
  ):