mixpeek 0.15.0__py3-none-any.whl → 0.15.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mixpeek/_version.py +1 -1
- mixpeek/assets.py +8 -8
- mixpeek/collections.py +4 -4
- mixpeek/featureextractors.py +6 -6
- mixpeek/features.py +20 -20
- mixpeek/ingest.py +12 -54
- mixpeek/models/__init__.py +27 -108
- mixpeek/models/assetresponse.py +4 -4
- mixpeek/models/availableindexesresponse.py +2 -2
- mixpeek/models/availablemodels.py +4 -0
- mixpeek/models/createnamespacerequest.py +4 -4
- mixpeek/models/embeddingrequest.py +2 -2
- mixpeek/models/entitysettings.py +50 -0
- mixpeek/models/featureextractionembeddingrequest.py +2 -2
- mixpeek/models/imagedescribesettings.py +6 -6
- mixpeek/models/imagereadsettings.py +6 -6
- mixpeek/models/imagesettings.py +17 -4
- mixpeek/models/listassetsrequest.py +3 -3
- mixpeek/models/listfeaturesrequest.py +3 -3
- mixpeek/models/{logicaloperator_input.py → logicaloperator.py} +8 -8
- mixpeek/models/namespaceresponse.py +2 -2
- mixpeek/models/processimageurlinput.py +1 -13
- mixpeek/models/processtextinput.py +1 -13
- mixpeek/models/processvideourlinput.py +1 -13
- mixpeek/models/search_features_features_search_postop.py +4 -7
- mixpeek/models/{search_model_searchquery_input.py → search_model_searchquery.py} +7 -7
- mixpeek/models/searchassetsrequest.py +3 -3
- mixpeek/models/{searchrequestfeatures_output.py → searchrequestfeatures.py} +11 -11
- mixpeek/models/{percolaterequest.py → taskresponse.py} +15 -17
- mixpeek/models/taskstatus.py +1 -0
- mixpeek/models/taxonomyextractionconfig.py +31 -0
- mixpeek/models/textsettings.py +10 -4
- mixpeek/models/vectormodel.py +4 -0
- mixpeek/models/videodescribesettings.py +6 -6
- mixpeek/models/videoreadsettings.py +6 -6
- mixpeek/models/videosettings.py +17 -3
- mixpeek/models/videotranscriptionsettings.py +6 -6
- mixpeek/namespaces.py +10 -10
- mixpeek/sdk.py +0 -6
- mixpeek/sdkconfiguration.py +2 -2
- mixpeek/tasks.py +4 -4
- {mixpeek-0.15.0.dist-info → mixpeek-0.15.2.dist-info}/METADATA +1 -11
- {mixpeek-0.15.0.dist-info → mixpeek-0.15.2.dist-info}/RECORD +44 -56
- mixpeek/interactions.py +0 -228
- mixpeek/models/create_interaction_features_search_interactions_postop.py +0 -59
- mixpeek/models/db_model_taskresponse.py +0 -20
- mixpeek/models/delete_interaction_features_search_interactions_interaction_id_deleteop.py +0 -59
- mixpeek/models/get_interaction_features_search_interactions_interaction_id_getop.py +0 -59
- mixpeek/models/interactionresponse.py +0 -87
- mixpeek/models/interactiontype.py +0 -11
- mixpeek/models/list_interactions_features_search_interactions_getop.py +0 -96
- mixpeek/models/logicaloperator_output.py +0 -103
- mixpeek/models/searchinteraction.py +0 -82
- mixpeek/models/searchquery_output.py +0 -79
- mixpeek/models/searchrequestfeatures_input.py +0 -151
- mixpeek/models/tasks_model_taskresponse.py +0 -24
- mixpeek/searchinteractions.py +0 -666
- {mixpeek-0.15.0.dist-info → mixpeek-0.15.2.dist-info}/WHEEL +0 -0
@@ -9,7 +9,7 @@ from typing import List, Union
|
|
9
9
|
from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict
|
10
10
|
|
11
11
|
|
12
|
-
class
|
12
|
+
class LogicalOperatorTypedDict(TypedDict):
|
13
13
|
case_sensitive: NotRequired[Nullable[bool]]
|
14
14
|
r"""Whether to perform case-sensitive matching"""
|
15
15
|
and_: NotRequired[Nullable[List[AndTypedDict]]]
|
@@ -20,7 +20,7 @@ class LogicalOperatorInputTypedDict(TypedDict):
|
|
20
20
|
r"""Logical NOR operation"""
|
21
21
|
|
22
22
|
|
23
|
-
class
|
23
|
+
class LogicalOperator(BaseModel):
|
24
24
|
case_sensitive: OptionalNullable[bool] = UNSET
|
25
25
|
r"""Whether to perform case-sensitive matching"""
|
26
26
|
|
@@ -65,24 +65,24 @@ class LogicalOperatorInput(BaseModel):
|
|
65
65
|
|
66
66
|
|
67
67
|
AndTypedDict = TypeAliasType(
|
68
|
-
"AndTypedDict", Union[FilterConditionTypedDict,
|
68
|
+
"AndTypedDict", Union[FilterConditionTypedDict, LogicalOperatorTypedDict]
|
69
69
|
)
|
70
70
|
|
71
71
|
|
72
|
-
And = TypeAliasType("And", Union[FilterCondition,
|
72
|
+
And = TypeAliasType("And", Union[FilterCondition, LogicalOperator])
|
73
73
|
|
74
74
|
|
75
75
|
OrTypedDict = TypeAliasType(
|
76
|
-
"OrTypedDict", Union[FilterConditionTypedDict,
|
76
|
+
"OrTypedDict", Union[FilterConditionTypedDict, LogicalOperatorTypedDict]
|
77
77
|
)
|
78
78
|
|
79
79
|
|
80
|
-
Or = TypeAliasType("Or", Union[FilterCondition,
|
80
|
+
Or = TypeAliasType("Or", Union[FilterCondition, LogicalOperator])
|
81
81
|
|
82
82
|
|
83
83
|
NorTypedDict = TypeAliasType(
|
84
|
-
"NorTypedDict", Union[FilterConditionTypedDict,
|
84
|
+
"NorTypedDict", Union[FilterConditionTypedDict, LogicalOperatorTypedDict]
|
85
85
|
)
|
86
86
|
|
87
87
|
|
88
|
-
Nor = TypeAliasType("Nor", Union[FilterCondition,
|
88
|
+
Nor = TypeAliasType("Nor", Union[FilterCondition, LogicalOperator])
|
@@ -10,7 +10,7 @@ from typing_extensions import TypedDict
|
|
10
10
|
class NamespaceResponseTypedDict(TypedDict):
|
11
11
|
namespace_id: str
|
12
12
|
namespace_name: str
|
13
|
-
|
13
|
+
embedding_models: List[str]
|
14
14
|
payload_indexes: Nullable[List[Any]]
|
15
15
|
|
16
16
|
|
@@ -19,7 +19,7 @@ class NamespaceResponse(BaseModel):
|
|
19
19
|
|
20
20
|
namespace_name: str
|
21
21
|
|
22
|
-
|
22
|
+
embedding_models: List[str]
|
23
23
|
|
24
24
|
payload_indexes: Nullable[List[Any]]
|
25
25
|
|
@@ -3,7 +3,6 @@
|
|
3
3
|
from __future__ import annotations
|
4
4
|
from .assetupdate import AssetUpdate, AssetUpdateTypedDict
|
5
5
|
from .imagesettings import ImageSettings, ImageSettingsTypedDict
|
6
|
-
from .percolaterequest import PercolateRequest, PercolateRequestTypedDict
|
7
6
|
from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
8
7
|
from pydantic import model_serializer
|
9
8
|
from typing import Optional
|
@@ -27,8 +26,6 @@ class ProcessImageURLInputTypedDict(TypedDict):
|
|
27
26
|
r"""Controls how processing results are stored - either creating a new asset or updating an existing one."""
|
28
27
|
metadata: NotRequired[ProcessImageURLInputMetadataTypedDict]
|
29
28
|
r"""Additional metadata associated with the asset. Can include any key-value pairs relevant to the asset."""
|
30
|
-
percolate: NotRequired[Nullable[PercolateRequestTypedDict]]
|
31
|
-
r"""Settings for percolating the asset against stored queries."""
|
32
29
|
skip_duplicate: NotRequired[Nullable[bool]]
|
33
30
|
r"""Makes feature extraction idempotent. When True and a duplicate file hash is found, copies features from the existing asset instead of reprocessing. This allows the same file to be used multiple times with different metadata while avoiding redundant processing."""
|
34
31
|
feature_extractors: NotRequired[Nullable[ImageSettingsTypedDict]]
|
@@ -48,9 +45,6 @@ class ProcessImageURLInput(BaseModel):
|
|
48
45
|
metadata: Optional[ProcessImageURLInputMetadata] = None
|
49
46
|
r"""Additional metadata associated with the asset. Can include any key-value pairs relevant to the asset."""
|
50
47
|
|
51
|
-
percolate: OptionalNullable[PercolateRequest] = UNSET
|
52
|
-
r"""Settings for percolating the asset against stored queries."""
|
53
|
-
|
54
48
|
skip_duplicate: OptionalNullable[bool] = UNSET
|
55
49
|
r"""Makes feature extraction idempotent. When True and a duplicate file hash is found, copies features from the existing asset instead of reprocessing. This allows the same file to be used multiple times with different metadata while avoiding redundant processing."""
|
56
50
|
|
@@ -62,16 +56,10 @@ class ProcessImageURLInput(BaseModel):
|
|
62
56
|
optional_fields = [
|
63
57
|
"asset_update",
|
64
58
|
"metadata",
|
65
|
-
"percolate",
|
66
|
-
"skip_duplicate",
|
67
|
-
"feature_extractors",
|
68
|
-
]
|
69
|
-
nullable_fields = [
|
70
|
-
"asset_update",
|
71
|
-
"percolate",
|
72
59
|
"skip_duplicate",
|
73
60
|
"feature_extractors",
|
74
61
|
]
|
62
|
+
nullable_fields = ["asset_update", "skip_duplicate", "feature_extractors"]
|
75
63
|
null_default_fields = []
|
76
64
|
|
77
65
|
serialized = handler(self)
|
@@ -2,7 +2,6 @@
|
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
4
|
from .assetupdate import AssetUpdate, AssetUpdateTypedDict
|
5
|
-
from .percolaterequest import PercolateRequest, PercolateRequestTypedDict
|
6
5
|
from .textsettings import TextSettings, TextSettingsTypedDict
|
7
6
|
from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
8
7
|
from pydantic import model_serializer
|
@@ -27,8 +26,6 @@ class ProcessTextInputTypedDict(TypedDict):
|
|
27
26
|
r"""Additional metadata associated with the file. Can include any key-value pairs relevant to the file."""
|
28
27
|
feature_extractors: NotRequired[Nullable[TextSettingsTypedDict]]
|
29
28
|
r"""Settings for text processing."""
|
30
|
-
percolate: NotRequired[Nullable[PercolateRequestTypedDict]]
|
31
|
-
r"""Settings for percolating the asset against stored queries."""
|
32
29
|
skip_duplicate: NotRequired[Nullable[bool]]
|
33
30
|
r"""Skips processing when a duplicate hash is found and stores an error by the task_id with the existing asset_id"""
|
34
31
|
|
@@ -46,9 +43,6 @@ class ProcessTextInput(BaseModel):
|
|
46
43
|
feature_extractors: OptionalNullable[TextSettings] = UNSET
|
47
44
|
r"""Settings for text processing."""
|
48
45
|
|
49
|
-
percolate: OptionalNullable[PercolateRequest] = UNSET
|
50
|
-
r"""Settings for percolating the asset against stored queries."""
|
51
|
-
|
52
46
|
skip_duplicate: OptionalNullable[bool] = UNSET
|
53
47
|
r"""Skips processing when a duplicate hash is found and stores an error by the task_id with the existing asset_id"""
|
54
48
|
|
@@ -58,15 +52,9 @@ class ProcessTextInput(BaseModel):
|
|
58
52
|
"asset_update",
|
59
53
|
"metadata",
|
60
54
|
"feature_extractors",
|
61
|
-
"percolate",
|
62
|
-
"skip_duplicate",
|
63
|
-
]
|
64
|
-
nullable_fields = [
|
65
|
-
"asset_update",
|
66
|
-
"feature_extractors",
|
67
|
-
"percolate",
|
68
55
|
"skip_duplicate",
|
69
56
|
]
|
57
|
+
nullable_fields = ["asset_update", "feature_extractors", "skip_duplicate"]
|
70
58
|
null_default_fields = []
|
71
59
|
|
72
60
|
serialized = handler(self)
|
@@ -2,7 +2,6 @@
|
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
4
|
from .assetupdate import AssetUpdate, AssetUpdateTypedDict
|
5
|
-
from .percolaterequest import PercolateRequest, PercolateRequestTypedDict
|
6
5
|
from .videosettings import VideoSettings, VideoSettingsTypedDict
|
7
6
|
from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
8
7
|
from pydantic import model_serializer
|
@@ -27,8 +26,6 @@ class ProcessVideoURLInputTypedDict(TypedDict):
|
|
27
26
|
r"""Controls how processing results are stored - either creating a new asset or updating an existing one."""
|
28
27
|
metadata: NotRequired[ProcessVideoURLInputMetadataTypedDict]
|
29
28
|
r"""Additional metadata associated with the asset. Can include any key-value pairs relevant to the asset."""
|
30
|
-
percolate: NotRequired[Nullable[PercolateRequestTypedDict]]
|
31
|
-
r"""Settings for percolating the asset against stored queries."""
|
32
29
|
skip_duplicate: NotRequired[Nullable[bool]]
|
33
30
|
r"""Makes feature extraction idempotent. When True and a duplicate file hash is found, copies features from the existing asset instead of reprocessing. This allows the same file to be used multiple times with different metadata while avoiding redundant processing."""
|
34
31
|
feature_extractors: NotRequired[Nullable[List[VideoSettingsTypedDict]]]
|
@@ -48,9 +45,6 @@ class ProcessVideoURLInput(BaseModel):
|
|
48
45
|
metadata: Optional[ProcessVideoURLInputMetadata] = None
|
49
46
|
r"""Additional metadata associated with the asset. Can include any key-value pairs relevant to the asset."""
|
50
47
|
|
51
|
-
percolate: OptionalNullable[PercolateRequest] = UNSET
|
52
|
-
r"""Settings for percolating the asset against stored queries."""
|
53
|
-
|
54
48
|
skip_duplicate: OptionalNullable[bool] = UNSET
|
55
49
|
r"""Makes feature extraction idempotent. When True and a duplicate file hash is found, copies features from the existing asset instead of reprocessing. This allows the same file to be used multiple times with different metadata while avoiding redundant processing."""
|
56
50
|
|
@@ -62,16 +56,10 @@ class ProcessVideoURLInput(BaseModel):
|
|
62
56
|
optional_fields = [
|
63
57
|
"asset_update",
|
64
58
|
"metadata",
|
65
|
-
"percolate",
|
66
|
-
"skip_duplicate",
|
67
|
-
"feature_extractors",
|
68
|
-
]
|
69
|
-
nullable_fields = [
|
70
|
-
"asset_update",
|
71
|
-
"percolate",
|
72
59
|
"skip_duplicate",
|
73
60
|
"feature_extractors",
|
74
61
|
]
|
62
|
+
nullable_fields = ["asset_update", "skip_duplicate", "feature_extractors"]
|
75
63
|
null_default_fields = []
|
76
64
|
|
77
65
|
serialized = handler(self)
|
@@ -1,10 +1,7 @@
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
|
-
from .
|
5
|
-
SearchRequestFeaturesInput,
|
6
|
-
SearchRequestFeaturesInputTypedDict,
|
7
|
-
)
|
4
|
+
from .searchrequestfeatures import SearchRequestFeatures, SearchRequestFeaturesTypedDict
|
8
5
|
from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
9
6
|
from mixpeek.utils import (
|
10
7
|
FieldMetadata,
|
@@ -19,7 +16,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict
|
|
19
16
|
|
20
17
|
|
21
18
|
class SearchFeaturesFeaturesSearchPostRequestTypedDict(TypedDict):
|
22
|
-
|
19
|
+
search_request_features: SearchRequestFeaturesTypedDict
|
23
20
|
offset_position: NotRequired[Nullable[int]]
|
24
21
|
r"""The position to start returning results from. Used for pagination. Does not work with group_by"""
|
25
22
|
page_size: NotRequired[int]
|
@@ -29,8 +26,8 @@ class SearchFeaturesFeaturesSearchPostRequestTypedDict(TypedDict):
|
|
29
26
|
|
30
27
|
|
31
28
|
class SearchFeaturesFeaturesSearchPostRequest(BaseModel):
|
32
|
-
|
33
|
-
|
29
|
+
search_request_features: Annotated[
|
30
|
+
SearchRequestFeatures,
|
34
31
|
FieldMetadata(request=RequestMetadata(media_type="application/json")),
|
35
32
|
]
|
36
33
|
|
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
4
|
from .availablemodels import AvailableModels
|
5
|
-
from .
|
5
|
+
from .logicaloperator import LogicalOperator, LogicalOperatorTypedDict
|
6
6
|
from .querysettings import QuerySettings, QuerySettingsTypedDict
|
7
7
|
from enum import Enum
|
8
8
|
from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
@@ -18,20 +18,20 @@ class Type(str, Enum):
|
|
18
18
|
BASE64 = "base64"
|
19
19
|
|
20
20
|
|
21
|
-
class
|
22
|
-
|
21
|
+
class SearchModelSearchQueryTypedDict(TypedDict):
|
22
|
+
embedding_model: AvailableModels
|
23
23
|
value: str
|
24
24
|
r"""Query value - can be text, URL, or base64 encoded image"""
|
25
25
|
type: Type
|
26
26
|
r"""Type of input (text, url, or base64)"""
|
27
|
-
filters: NotRequired[Nullable[
|
27
|
+
filters: NotRequired[Nullable[LogicalOperatorTypedDict]]
|
28
28
|
r"""Optional filters for the query, this is used for filtering individual vector indexes"""
|
29
29
|
settings: NotRequired[Nullable[QuerySettingsTypedDict]]
|
30
30
|
r"""Optional settings for this specific query"""
|
31
31
|
|
32
32
|
|
33
|
-
class
|
34
|
-
|
33
|
+
class SearchModelSearchQuery(BaseModel):
|
34
|
+
embedding_model: AvailableModels
|
35
35
|
|
36
36
|
value: str
|
37
37
|
r"""Query value - can be text, URL, or base64 encoded image"""
|
@@ -39,7 +39,7 @@ class SearchModelSearchQueryInput(BaseModel):
|
|
39
39
|
type: Type
|
40
40
|
r"""Type of input (text, url, or base64)"""
|
41
41
|
|
42
|
-
filters: OptionalNullable[
|
42
|
+
filters: OptionalNullable[LogicalOperator] = UNSET
|
43
43
|
r"""Optional filters for the query, this is used for filtering individual vector indexes"""
|
44
44
|
|
45
45
|
settings: OptionalNullable[QuerySettings] = UNSET
|
@@ -5,7 +5,7 @@ from .assets_model_searchquery import (
|
|
5
5
|
AssetsModelSearchQuery,
|
6
6
|
AssetsModelSearchQueryTypedDict,
|
7
7
|
)
|
8
|
-
from .
|
8
|
+
from .logicaloperator import LogicalOperator, LogicalOperatorTypedDict
|
9
9
|
from .sortoption import SortOption, SortOptionTypedDict
|
10
10
|
from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
11
11
|
from pydantic import model_serializer
|
@@ -18,7 +18,7 @@ class SearchAssetsRequestTypedDict(TypedDict):
|
|
18
18
|
r"""List of Collection IDs or Names to search within, required"""
|
19
19
|
query: NotRequired[Nullable[AssetsModelSearchQueryTypedDict]]
|
20
20
|
r"""Structured query object specifying which fields to search in and what to search for"""
|
21
|
-
filters: NotRequired[Nullable[
|
21
|
+
filters: NotRequired[Nullable[LogicalOperatorTypedDict]]
|
22
22
|
r"""Complex nested query filters"""
|
23
23
|
sort: NotRequired[Nullable[SortOptionTypedDict]]
|
24
24
|
r"""List of fields to sort by"""
|
@@ -35,7 +35,7 @@ class SearchAssetsRequest(BaseModel):
|
|
35
35
|
query: OptionalNullable[AssetsModelSearchQuery] = UNSET
|
36
36
|
r"""Structured query object specifying which fields to search in and what to search for"""
|
37
37
|
|
38
|
-
filters: OptionalNullable[
|
38
|
+
filters: OptionalNullable[LogicalOperator] = UNSET
|
39
39
|
r"""Complex nested query filters"""
|
40
40
|
|
41
41
|
sort: OptionalNullable[SortOption] = UNSET
|
@@ -2,12 +2,12 @@
|
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
4
|
from .groupbyoptions import GroupByOptions, GroupByOptionsTypedDict
|
5
|
-
from .
|
6
|
-
LogicalOperatorOutput,
|
7
|
-
LogicalOperatorOutputTypedDict,
|
8
|
-
)
|
5
|
+
from .logicaloperator import LogicalOperator, LogicalOperatorTypedDict
|
9
6
|
from .rerankingoptions import RerankingOptions, RerankingOptionsTypedDict
|
10
|
-
from .
|
7
|
+
from .search_model_searchquery import (
|
8
|
+
SearchModelSearchQuery,
|
9
|
+
SearchModelSearchQueryTypedDict,
|
10
|
+
)
|
11
11
|
from .sortoption import SortOption, SortOptionTypedDict
|
12
12
|
from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
13
13
|
from pydantic import model_serializer
|
@@ -15,8 +15,8 @@ from typing import List
|
|
15
15
|
from typing_extensions import NotRequired, TypedDict
|
16
16
|
|
17
17
|
|
18
|
-
class
|
19
|
-
queries: List[
|
18
|
+
class SearchRequestFeaturesTypedDict(TypedDict):
|
19
|
+
queries: List[SearchModelSearchQueryTypedDict]
|
20
20
|
r"""List of search queries to perform.
|
21
21
|
|
22
22
|
Behavior:
|
@@ -40,7 +40,7 @@ class SearchRequestFeaturesOutputTypedDict(TypedDict):
|
|
40
40
|
"""
|
41
41
|
collections: List[str]
|
42
42
|
r"""List of Collection names to search within, required"""
|
43
|
-
filters: NotRequired[Nullable[
|
43
|
+
filters: NotRequired[Nullable[LogicalOperatorTypedDict]]
|
44
44
|
r"""Used for filtering across all indexes"""
|
45
45
|
group_by: NotRequired[Nullable[GroupByOptionsTypedDict]]
|
46
46
|
r"""Grouping options for search results"""
|
@@ -56,8 +56,8 @@ class SearchRequestFeaturesOutputTypedDict(TypedDict):
|
|
56
56
|
r"""Return the presigned URL for the asset and preview asset, this will introduce additional latency"""
|
57
57
|
|
58
58
|
|
59
|
-
class
|
60
|
-
queries: List[
|
59
|
+
class SearchRequestFeatures(BaseModel):
|
60
|
+
queries: List[SearchModelSearchQuery]
|
61
61
|
r"""List of search queries to perform.
|
62
62
|
|
63
63
|
Behavior:
|
@@ -83,7 +83,7 @@ class SearchRequestFeaturesOutput(BaseModel):
|
|
83
83
|
collections: List[str]
|
84
84
|
r"""List of Collection names to search within, required"""
|
85
85
|
|
86
|
-
filters: OptionalNullable[
|
86
|
+
filters: OptionalNullable[LogicalOperator] = UNSET
|
87
87
|
r"""Used for filtering across all indexes"""
|
88
88
|
|
89
89
|
group_by: OptionalNullable[GroupByOptions] = UNSET
|
@@ -1,35 +1,33 @@
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
|
+
from .taskstatus import TaskStatus
|
4
5
|
from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
5
6
|
from pydantic import model_serializer
|
6
|
-
from typing import Optional
|
7
|
+
from typing import Any, List, Optional
|
7
8
|
from typing_extensions import NotRequired, TypedDict
|
8
9
|
|
9
10
|
|
10
|
-
class
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
max_candidates: NotRequired[Nullable[int]]
|
16
|
-
r"""Maximum number of matching percolators to return in the response"""
|
11
|
+
class TaskResponseTypedDict(TypedDict):
|
12
|
+
task_id: str
|
13
|
+
status: NotRequired[TaskStatus]
|
14
|
+
inputs: NotRequired[Nullable[List[Any]]]
|
15
|
+
outputs: NotRequired[Nullable[List[Any]]]
|
17
16
|
|
18
17
|
|
19
|
-
class
|
20
|
-
|
21
|
-
r"""Whether to enable percolator matching for this request"""
|
18
|
+
class TaskResponse(BaseModel):
|
19
|
+
task_id: str
|
22
20
|
|
23
|
-
|
24
|
-
r"""Minimum similarity score (0-1) required for a match. Higher values mean stricter matching."""
|
21
|
+
status: Optional[TaskStatus] = None
|
25
22
|
|
26
|
-
|
27
|
-
|
23
|
+
inputs: OptionalNullable[List[Any]] = UNSET
|
24
|
+
|
25
|
+
outputs: OptionalNullable[List[Any]] = UNSET
|
28
26
|
|
29
27
|
@model_serializer(mode="wrap")
|
30
28
|
def serialize_model(self, handler):
|
31
|
-
optional_fields = ["
|
32
|
-
nullable_fields = ["
|
29
|
+
optional_fields = ["status", "inputs", "outputs"]
|
30
|
+
nullable_fields = ["inputs", "outputs"]
|
33
31
|
null_default_fields = []
|
34
32
|
|
35
33
|
serialized = handler(self)
|
mixpeek/models/taskstatus.py
CHANGED
@@ -0,0 +1,31 @@
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
from .availablemodels import AvailableModels
|
5
|
+
from mixpeek.types import BaseModel
|
6
|
+
from typing import List, Optional
|
7
|
+
from typing_extensions import NotRequired, TypedDict
|
8
|
+
|
9
|
+
|
10
|
+
class TaxonomyExtractionConfigTypedDict(TypedDict):
|
11
|
+
r"""Configuration for taxonomy-based entity extraction during ingestion"""
|
12
|
+
|
13
|
+
taxonomy_ids: List[str]
|
14
|
+
r"""List of taxonomy IDs to use for classification"""
|
15
|
+
embedding_models: List[AvailableModels]
|
16
|
+
r"""Vector indexes to use for classification"""
|
17
|
+
confidence_threshold: NotRequired[float]
|
18
|
+
r"""Minimum confidence score required for classification"""
|
19
|
+
|
20
|
+
|
21
|
+
class TaxonomyExtractionConfig(BaseModel):
|
22
|
+
r"""Configuration for taxonomy-based entity extraction during ingestion"""
|
23
|
+
|
24
|
+
taxonomy_ids: List[str]
|
25
|
+
r"""List of taxonomy IDs to use for classification"""
|
26
|
+
|
27
|
+
embedding_models: List[AvailableModels]
|
28
|
+
r"""Vector indexes to use for classification"""
|
29
|
+
|
30
|
+
confidence_threshold: Optional[float] = 0.8
|
31
|
+
r"""Minimum confidence score required for classification"""
|
mixpeek/models/textsettings.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
4
|
from .embeddingrequest import EmbeddingRequest, EmbeddingRequestTypedDict
|
5
|
+
from .entitysettings import EntitySettings, EntitySettingsTypedDict
|
5
6
|
from .jsontextoutputsettings import (
|
6
7
|
JSONTextOutputSettings,
|
7
8
|
JSONTextOutputSettingsTypedDict,
|
@@ -15,25 +16,30 @@ from typing_extensions import NotRequired, TypedDict
|
|
15
16
|
class TextSettingsTypedDict(TypedDict):
|
16
17
|
embed: NotRequired[List[EmbeddingRequestTypedDict]]
|
17
18
|
r"""List of embedding settings for generating multiple embeddings. field_name's provided are how the raw text will be inserted, if not provided, the field_name will be auto-generated.
|
18
|
-
Default: [{type: 'text',
|
19
|
+
Default: [{type: 'text', embedding_model: 'multimodal'}] if none provided.
|
19
20
|
"""
|
20
21
|
json_output: NotRequired[Nullable[JSONTextOutputSettingsTypedDict]]
|
21
22
|
r"""Settings for structured JSON output of text analysis."""
|
23
|
+
entities: NotRequired[Nullable[EntitySettingsTypedDict]]
|
24
|
+
r"""Settings for extracting entities from text content"""
|
22
25
|
|
23
26
|
|
24
27
|
class TextSettings(BaseModel):
|
25
28
|
embed: Optional[List[EmbeddingRequest]] = None
|
26
29
|
r"""List of embedding settings for generating multiple embeddings. field_name's provided are how the raw text will be inserted, if not provided, the field_name will be auto-generated.
|
27
|
-
Default: [{type: 'text',
|
30
|
+
Default: [{type: 'text', embedding_model: 'multimodal'}] if none provided.
|
28
31
|
"""
|
29
32
|
|
30
33
|
json_output: OptionalNullable[JSONTextOutputSettings] = UNSET
|
31
34
|
r"""Settings for structured JSON output of text analysis."""
|
32
35
|
|
36
|
+
entities: OptionalNullable[EntitySettings] = UNSET
|
37
|
+
r"""Settings for extracting entities from text content"""
|
38
|
+
|
33
39
|
@model_serializer(mode="wrap")
|
34
40
|
def serialize_model(self, handler):
|
35
|
-
optional_fields = ["embed", "json_output"]
|
36
|
-
nullable_fields = ["json_output"]
|
41
|
+
optional_fields = ["embed", "json_output", "entities"]
|
42
|
+
nullable_fields = ["json_output", "entities"]
|
37
43
|
null_default_fields = []
|
38
44
|
|
39
45
|
serialized = handler(self)
|
mixpeek/models/vectormodel.py
CHANGED
@@ -6,6 +6,10 @@ from enum import Enum
|
|
6
6
|
|
7
7
|
class VectorModel(str, Enum):
|
8
8
|
IMAGE = "image"
|
9
|
+
OPENAI_CLIP_VIT_BASE_PATCH32 = "openai-clip-vit-base-patch32"
|
9
10
|
MULTIMODAL = "multimodal"
|
11
|
+
VERTEX_MULTIMODAL = "vertex-multimodal"
|
10
12
|
TEXT = "text"
|
13
|
+
BAAI_BGE_M3 = "baai-bge-m3"
|
11
14
|
KEYWORD = "keyword"
|
15
|
+
NAVER_SPLADE_V3 = "naver-splade-v3"
|
@@ -25,8 +25,8 @@ class VideoDescribeSettingsTypedDict(TypedDict):
|
|
25
25
|
r"""Maximum length of the description"""
|
26
26
|
json_output: NotRequired[VideoDescribeSettingsJSONOutputTypedDict]
|
27
27
|
r"""JSON format for the response"""
|
28
|
-
|
29
|
-
r"""Name of the vector model to use for embedding the text output. If
|
28
|
+
embedding_model: NotRequired[Nullable[VectorModel]]
|
29
|
+
r"""Name of the vector model to use for embedding the text output. If embedding_model is duplicated, the vector will be overwritten."""
|
30
30
|
|
31
31
|
|
32
32
|
class VideoDescribeSettings(BaseModel):
|
@@ -42,8 +42,8 @@ class VideoDescribeSettings(BaseModel):
|
|
42
42
|
json_output: Optional[VideoDescribeSettingsJSONOutput] = None
|
43
43
|
r"""JSON format for the response"""
|
44
44
|
|
45
|
-
|
46
|
-
r"""Name of the vector model to use for embedding the text output. If
|
45
|
+
embedding_model: OptionalNullable[VectorModel] = UNSET
|
46
|
+
r"""Name of the vector model to use for embedding the text output. If embedding_model is duplicated, the vector will be overwritten."""
|
47
47
|
|
48
48
|
@model_serializer(mode="wrap")
|
49
49
|
def serialize_model(self, handler):
|
@@ -52,9 +52,9 @@ class VideoDescribeSettings(BaseModel):
|
|
52
52
|
"enabled",
|
53
53
|
"max_length",
|
54
54
|
"json_output",
|
55
|
-
"
|
55
|
+
"embedding_model",
|
56
56
|
]
|
57
|
-
nullable_fields = ["prompt", "max_length", "
|
57
|
+
nullable_fields = ["prompt", "max_length", "embedding_model"]
|
58
58
|
null_default_fields = []
|
59
59
|
|
60
60
|
serialized = handler(self)
|
@@ -23,8 +23,8 @@ class VideoReadSettingsTypedDict(TypedDict):
|
|
23
23
|
r"""Prompt for reading on-screen text"""
|
24
24
|
json_output: NotRequired[JSONOutputTypedDict]
|
25
25
|
r"""JSON format for the response"""
|
26
|
-
|
27
|
-
r"""Name of the vector model to use for embedding the text output. If
|
26
|
+
embedding_model: NotRequired[Nullable[VectorModel]]
|
27
|
+
r"""Name of the vector model to use for embedding the text output. If embedding_model is duplicated, the vector will be overwritten."""
|
28
28
|
|
29
29
|
|
30
30
|
class VideoReadSettings(BaseModel):
|
@@ -37,13 +37,13 @@ class VideoReadSettings(BaseModel):
|
|
37
37
|
json_output: Optional[JSONOutput] = None
|
38
38
|
r"""JSON format for the response"""
|
39
39
|
|
40
|
-
|
41
|
-
r"""Name of the vector model to use for embedding the text output. If
|
40
|
+
embedding_model: OptionalNullable[VectorModel] = UNSET
|
41
|
+
r"""Name of the vector model to use for embedding the text output. If embedding_model is duplicated, the vector will be overwritten."""
|
42
42
|
|
43
43
|
@model_serializer(mode="wrap")
|
44
44
|
def serialize_model(self, handler):
|
45
|
-
optional_fields = ["enabled", "prompt", "json_output", "
|
46
|
-
nullable_fields = ["prompt", "
|
45
|
+
optional_fields = ["enabled", "prompt", "json_output", "embedding_model"]
|
46
|
+
nullable_fields = ["prompt", "embedding_model"]
|
47
47
|
null_default_fields = []
|
48
48
|
|
49
49
|
serialized = handler(self)
|
mixpeek/models/videosettings.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
4
|
from .embeddingrequest import EmbeddingRequest, EmbeddingRequestTypedDict
|
5
|
+
from .entitysettings import EntitySettings, EntitySettingsTypedDict
|
5
6
|
from .jsonvideooutputsettings import (
|
6
7
|
JSONVideoOutputSettings,
|
7
8
|
JSONVideoOutputSettingsTypedDict,
|
@@ -26,7 +27,7 @@ class VideoSettingsTypedDict(TypedDict):
|
|
26
27
|
r"""Settings for reading and analyzing video content."""
|
27
28
|
embed: NotRequired[List[EmbeddingRequestTypedDict]]
|
28
29
|
r"""List of embedding settings for generating multiple embeddings. For now, if url is provided, value must be None.
|
29
|
-
Default: [{type: 'url',
|
30
|
+
Default: [{type: 'url', embedding_model: 'multimodal'}] if none provided.
|
30
31
|
"""
|
31
32
|
transcribe: NotRequired[Nullable[VideoTranscriptionSettingsTypedDict]]
|
32
33
|
r"""Settings for transcribing video audio."""
|
@@ -36,6 +37,8 @@ class VideoSettingsTypedDict(TypedDict):
|
|
36
37
|
r"""Settings for object detection in video frames."""
|
37
38
|
json_output: NotRequired[Nullable[JSONVideoOutputSettingsTypedDict]]
|
38
39
|
r"""Settings for structured JSON output of video analysis."""
|
40
|
+
entities: NotRequired[Nullable[EntitySettingsTypedDict]]
|
41
|
+
r"""Settings for extracting entities from video content"""
|
39
42
|
|
40
43
|
|
41
44
|
class VideoSettings(BaseModel):
|
@@ -47,7 +50,7 @@ class VideoSettings(BaseModel):
|
|
47
50
|
|
48
51
|
embed: Optional[List[EmbeddingRequest]] = None
|
49
52
|
r"""List of embedding settings for generating multiple embeddings. For now, if url is provided, value must be None.
|
50
|
-
Default: [{type: 'url',
|
53
|
+
Default: [{type: 'url', embedding_model: 'multimodal'}] if none provided.
|
51
54
|
"""
|
52
55
|
|
53
56
|
transcribe: OptionalNullable[VideoTranscriptionSettings] = UNSET
|
@@ -62,6 +65,9 @@ class VideoSettings(BaseModel):
|
|
62
65
|
json_output: OptionalNullable[JSONVideoOutputSettings] = UNSET
|
63
66
|
r"""Settings for structured JSON output of video analysis."""
|
64
67
|
|
68
|
+
entities: OptionalNullable[EntitySettings] = UNSET
|
69
|
+
r"""Settings for extracting entities from video content"""
|
70
|
+
|
65
71
|
@model_serializer(mode="wrap")
|
66
72
|
def serialize_model(self, handler):
|
67
73
|
optional_fields = [
|
@@ -72,8 +78,16 @@ class VideoSettings(BaseModel):
|
|
72
78
|
"describe",
|
73
79
|
"detect",
|
74
80
|
"json_output",
|
81
|
+
"entities",
|
82
|
+
]
|
83
|
+
nullable_fields = [
|
84
|
+
"read",
|
85
|
+
"transcribe",
|
86
|
+
"describe",
|
87
|
+
"detect",
|
88
|
+
"json_output",
|
89
|
+
"entities",
|
75
90
|
]
|
76
|
-
nullable_fields = ["read", "transcribe", "describe", "detect", "json_output"]
|
77
91
|
null_default_fields = []
|
78
92
|
|
79
93
|
serialized = handler(self)
|