mixpeek 0.20.20__py3-none-any.whl → 0.21.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (228) hide show
  1. mixpeek/_version.py +3 -3
  2. mixpeek/{assets.py → bucket_objects.py} +309 -793
  3. mixpeek/buckets.py +1292 -0
  4. mixpeek/clusters.py +306 -0
  5. mixpeek/collection_cache.py +820 -0
  6. mixpeek/{taxonomyentities.py → collection_documents.py} +305 -559
  7. mixpeek/collections.py +122 -996
  8. mixpeek/features.py +45 -991
  9. mixpeek/models/__init__.py +625 -570
  10. mixpeek/models/aggregationtype.py +13 -0
  11. mixpeek/models/apikeyupdate.py +1 -1
  12. mixpeek/models/attributebasedconfig.py +21 -0
  13. mixpeek/models/automaticnaming.py +29 -0
  14. mixpeek/models/basicfeatureextractor.py +35 -0
  15. mixpeek/models/{classify_features_v1_entities_taxonomies_taxonomy_classify_postop.py → batch_delete_documents_v1_collections_collection_identifier_documents_batch_deleteop.py} +17 -15
  16. mixpeek/models/{update_collection_v1_collections_collection_putop.py → batch_update_documents_v1_collections_collection_identifier_documents_batch_putop.py} +29 -19
  17. mixpeek/models/blobdetails.py +72 -0
  18. mixpeek/models/blobmodel.py +107 -0
  19. mixpeek/models/bucketcreaterequest.py +75 -0
  20. mixpeek/models/bucketresponse.py +90 -0
  21. mixpeek/models/bucketschema_input.py +37 -0
  22. mixpeek/models/bucketschema_output.py +37 -0
  23. mixpeek/models/{featureresponse.py → bucketschemafield_input.py} +45 -18
  24. mixpeek/models/bucketschemafield_output.py +101 -0
  25. mixpeek/models/bucketschemafieldtype.py +29 -0
  26. mixpeek/models/bucketupdaterequest.py +68 -0
  27. mixpeek/models/cachestats.py +40 -0
  28. mixpeek/models/{list_collections_v1_collections_getop.py → cleanup_cache_v1_collections_cache_cleanup_postop.py} +23 -12
  29. mixpeek/models/{inputtype.py → clusteringmethod.py} +3 -5
  30. mixpeek/models/clustertype.py +9 -0
  31. mixpeek/models/collectioncacheconfig_input.py +92 -0
  32. mixpeek/models/collectioncacheconfig_output.py +92 -0
  33. mixpeek/models/collectionconfig.py +32 -0
  34. mixpeek/models/collectionmodel.py +79 -8
  35. mixpeek/models/{create_taxonomy_v1_entities_taxonomies_postop.py → create_bucket_v1_buckets_create_postop.py} +7 -7
  36. mixpeek/models/{search_assets_v1_assets_search_postop.py → create_cluster_v1_clusters_postop.py} +7 -7
  37. mixpeek/models/{create_collection_v1_collections_postop.py → create_collection_v1_collections_create_postop.py} +3 -3
  38. mixpeek/models/{ingest_video_url_v1_ingest_videos_url_postop.py → create_interaction_v1_retrievers_interactions_postop.py} +7 -7
  39. mixpeek/models/{partial_asset_update_v1_assets_asset_id_patchop.py → create_object_v1_buckets_bucket_identifier_objects_create_postop.py} +11 -9
  40. mixpeek/models/{ingest_text_v1_ingest_text_postop.py → create_retriever_v1_retrievers_retrievers_postop.py} +10 -7
  41. mixpeek/models/{ingest_image_url_v1_ingest_images_url_postop.py → create_taxonomy_v1_taxonomies_postop.py} +7 -7
  42. mixpeek/models/createblobrequest.py +80 -0
  43. mixpeek/models/createclusterrequest.py +74 -0
  44. mixpeek/models/createcollectionrequest.py +72 -3
  45. mixpeek/models/createnamespacerequest.py +14 -8
  46. mixpeek/models/createobjectrequest.py +75 -0
  47. mixpeek/models/createretrieverrequest.py +90 -0
  48. mixpeek/models/{taxonomyupdate.py → createtaxonomyrequest.py} +15 -13
  49. mixpeek/models/{delete_asset_v1_assets_asset_id_deleteop.py → delete_bucket_v1_buckets_bucket_identifier_deleteop.py} +5 -5
  50. mixpeek/models/{delete_classifications_v1_entities_taxonomies_taxonomy_classifications_classification_id_deleteop.py → delete_document_v1_collections_collection_identifier_documents_document_id_deleteop.py} +11 -11
  51. mixpeek/models/{get_taxonomy_v1_entities_taxonomies_taxonomy_getop.py → delete_interaction_v1_retrievers_interactions_interaction_id_deleteop.py} +7 -7
  52. mixpeek/models/delete_object_v1_buckets_bucket_identifier_objects_object_identifier_deleteop.py +70 -0
  53. mixpeek/models/{featureextractionembeddingrequest.py → documenthandlingconfig.py} +12 -15
  54. mixpeek/models/documentinputhandling.py +11 -0
  55. mixpeek/models/{jsontextoutputsettings.py → documentlistresponse.py} +15 -14
  56. mixpeek/models/documentoutputhandling.py +10 -0
  57. mixpeek/models/documentoutputtype.py +11 -0
  58. mixpeek/models/documentresponse.py +87 -0
  59. mixpeek/models/{nodeupdate.py → documentupdate.py} +22 -13
  60. mixpeek/models/enrichmentfield.py +25 -0
  61. mixpeek/models/errordetail.py +1 -1
  62. mixpeek/models/{update_node_v1_entities_taxonomies_nodes_node_patchop.py → execute_retriever_v1_retrievers_retrievers_retriever_id_execute_postop.py} +11 -11
  63. mixpeek/models/featureextractorconfig.py +116 -0
  64. mixpeek/models/featureextractordefinition.py +192 -0
  65. mixpeek/models/filtercondition.py +25 -25
  66. mixpeek/models/filteroperator.py +24 -0
  67. mixpeek/models/generativemodels.py +10 -0
  68. mixpeek/models/{delete_feature_v1_features_feature_id_deleteop.py → get_bucket_v1_buckets_bucket_identifier_getop.py} +5 -5
  69. mixpeek/models/{list_taxonomies_v1_entities_taxonomies_getop.py → get_cache_stats_v1_collections_cache_stats_getop.py} +16 -14
  70. mixpeek/models/get_collection_v1_collections_collection_id_getop.py +59 -0
  71. mixpeek/models/get_document_v1_collections_collection_identifier_documents_document_id_getop.py +70 -0
  72. mixpeek/models/get_feature_extractor_v1_features_extractors_feature_id_getop.py +16 -0
  73. mixpeek/models/get_interaction_v1_retrievers_interactions_interaction_id_getop.py +57 -0
  74. mixpeek/models/{delete_collection_v1_collections_collection_deleteop.py → get_object_v1_buckets_bucket_identifier_objects_object_identifier_getop.py} +16 -7
  75. mixpeek/models/get_research_v1_research_getop.py +52 -0
  76. mixpeek/models/get_retriever_stages_v1_retrievers_stages_getop.py +52 -0
  77. mixpeek/models/{delete_taxonomy_v1_entities_taxonomies_taxonomy_deleteop.py → get_retriever_v1_retrievers_retrievers_retriever_id_getop.py} +5 -7
  78. mixpeek/models/get_task_v1_tasks_task_id_getop.py +1 -1
  79. mixpeek/models/getusagerequestmodel.py +91 -0
  80. mixpeek/models/hdbscanparameters.py +26 -0
  81. mixpeek/models/interactionresponse.py +86 -0
  82. mixpeek/models/interactiontype.py +22 -0
  83. mixpeek/models/{search_features_v1_features_search_postop.py → invalidate_cache_v1_collections_cache_invalidate_postop.py} +60 -34
  84. mixpeek/models/invalidationevent.py +67 -0
  85. mixpeek/models/invalidationstrategy.py +12 -0
  86. mixpeek/models/kill_task_v1_tasks_task_id_deleteop.py +1 -1
  87. mixpeek/models/list_active_tasks_v1_tasks_getop.py +1 -1
  88. mixpeek/models/{list_assets_v1_assets_postop.py → list_buckets_v1_buckets_postop.py} +18 -18
  89. mixpeek/models/{list_features_v1_features_postop.py → list_documents_v1_collections_collection_identifier_documents_getop.py} +54 -19
  90. mixpeek/models/list_interactions_v1_retrievers_interactions_getop.py +96 -0
  91. mixpeek/models/{list_classifications_v1_entities_taxonomies_taxonomy_classifications_postop.py → list_objects_v1_buckets_bucket_identifier_objects_postop.py} +22 -29
  92. mixpeek/models/{groupbyoptionsasset.py → listbucketsrequest.py} +19 -28
  93. mixpeek/models/listbucketsresponse.py +23 -0
  94. mixpeek/models/{groupbyoptions.py → listobjectsrequest.py} +19 -28
  95. mixpeek/models/listobjectsresponse.py +25 -0
  96. mixpeek/models/listtasksresponse.py +3 -6
  97. mixpeek/models/{logicaloperator.py → logicaloperator_input.py} +42 -24
  98. mixpeek/models/logicaloperator_output.py +121 -0
  99. mixpeek/models/multivectorindex.py +25 -0
  100. mixpeek/models/namespaceresponse.py +1 -1
  101. mixpeek/models/namespaceusage.py +45 -0
  102. mixpeek/models/namingmethod.py +8 -0
  103. mixpeek/models/objectresponse.py +124 -0
  104. mixpeek/models/{db_model_paginationresponse.py → paginationresponse.py} +3 -3
  105. mixpeek/models/payloadindexconfig.py +1 -1
  106. mixpeek/models/plan.py +82 -0
  107. mixpeek/models/recomputestrategy.py +12 -0
  108. mixpeek/models/resourcetotals.py +39 -0
  109. mixpeek/models/{jsonvideooutputsettings.py → retrieverbinding.py} +19 -11
  110. mixpeek/models/{logodetectsettings.py → retrievercacheconfig.py} +20 -11
  111. mixpeek/models/retrievermodel.py +95 -0
  112. mixpeek/models/retrieverqueryrequest.py +114 -0
  113. mixpeek/models/retrieverresponse.py +24 -0
  114. mixpeek/models/searchinteraction.py +81 -0
  115. mixpeek/models/singlelineageentry.py +99 -0
  116. mixpeek/models/sortdirection.py +11 -0
  117. mixpeek/models/sortoption.py +20 -12
  118. mixpeek/models/sourceconfig_input.py +72 -0
  119. mixpeek/models/sourceconfig_output.py +75 -0
  120. mixpeek/models/sourcetype.py +11 -0
  121. mixpeek/models/stageconfig_input.py +111 -0
  122. mixpeek/models/stageconfig_output.py +114 -0
  123. mixpeek/models/stagedefinition.py +110 -0
  124. mixpeek/models/{jsonimageoutputsettings.py → stageresponse.py} +21 -11
  125. mixpeek/models/taskresponse.py +1 -1
  126. mixpeek/models/taskstatus.py +1 -0
  127. mixpeek/models/{assetfeatures.py → taxonomyapplicationconfig.py} +23 -15
  128. mixpeek/models/taxonomyconfig.py +19 -0
  129. mixpeek/models/timeseriesdatapoint.py +25 -0
  130. mixpeek/models/timeseriesusage.py +36 -0
  131. mixpeek/models/{full_asset_update_v1_assets_asset_id_putop.py → update_bucket_v1_buckets_bucket_identifier_putop.py} +9 -9
  132. mixpeek/models/{update_taxonomy_v1_entities_taxonomies_taxonomy_patchop.py → update_document_v1_collections_collection_identifier_documents_document_id_putop.py} +25 -13
  133. mixpeek/models/{full_feature_update_v1_features_feature_id_putop.py → update_object_v1_buckets_bucket_identifier_objects_object_identifier_putop.py} +20 -9
  134. mixpeek/models/updatenamespacerequest.py +1 -1
  135. mixpeek/models/updateobjectrequest.py +75 -0
  136. mixpeek/models/usageresponse.py +86 -0
  137. mixpeek/models/usagesummary.py +39 -0
  138. mixpeek/models/usagetimerange.py +16 -0
  139. mixpeek/models/usermodel_input.py +3 -0
  140. mixpeek/models/usermodel_output.py +3 -0
  141. mixpeek/models/vectorbasedconfig.py +60 -0
  142. mixpeek/models/{collectionresult.py → vectorindex.py} +20 -15
  143. mixpeek/models/vectorindexdefinition.py +43 -0
  144. mixpeek/models/vectortype.py +4 -1
  145. mixpeek/namespaces.py +26 -480
  146. mixpeek/organization_notifications.py +214 -0
  147. mixpeek/organizations.py +6 -210
  148. mixpeek/{featureextractors.py → organizations_usage.py} +31 -39
  149. mixpeek/research.py +228 -0
  150. mixpeek/retriever_interactions.py +1036 -0
  151. mixpeek/retriever_stages.py +232 -0
  152. mixpeek/{ingestassets.py → retrievers.py} +209 -271
  153. mixpeek/sdk.py +40 -23
  154. mixpeek/taxonomies.py +43 -513
  155. mixpeek/types/basemodel.py +3 -3
  156. mixpeek/utils/enums.py +67 -27
  157. {mixpeek-0.20.20.dist-info → mixpeek-0.21.1.dist-info}/METADATA +68 -50
  158. mixpeek-0.21.1.dist-info/RECORD +216 -0
  159. {mixpeek-0.20.20.dist-info → mixpeek-0.21.1.dist-info}/WHEEL +1 -1
  160. mixpeek/models/actionusage.py +0 -16
  161. mixpeek/models/assetresponse.py +0 -166
  162. mixpeek/models/assets_model_searchquery.py +0 -21
  163. mixpeek/models/assetupdate.py +0 -28
  164. mixpeek/models/assignmentconfig.py +0 -67
  165. mixpeek/models/assignmentmode.py +0 -11
  166. mixpeek/models/availablemodels.py +0 -16
  167. mixpeek/models/availablemodelsresponse.py +0 -27
  168. mixpeek/models/classificationmatch.py +0 -77
  169. mixpeek/models/classificationwithfeature.py +0 -73
  170. mixpeek/models/collectiondetailsresponse.py +0 -83
  171. mixpeek/models/dateusage.py +0 -22
  172. mixpeek/models/denseembedding.py +0 -16
  173. mixpeek/models/discoverrequest.py +0 -64
  174. mixpeek/models/embeddingconfig.py +0 -33
  175. mixpeek/models/embeddingrequest.py +0 -92
  176. mixpeek/models/embeddingresponse.py +0 -64
  177. mixpeek/models/entitysettings.py +0 -50
  178. mixpeek/models/featureoptions.py +0 -25
  179. mixpeek/models/features_model_paginationresponse.py +0 -59
  180. mixpeek/models/featureupdaterequest.py +0 -21
  181. mixpeek/models/get_asset_v1_assets_asset_id_getop.py +0 -73
  182. mixpeek/models/get_asset_with_features_v1_assets_asset_id_features_getop.py +0 -73
  183. mixpeek/models/get_collection_details_v1_collections_collection_details_getop.py +0 -59
  184. mixpeek/models/get_collection_details_v1_collections_collection_getop.py +0 -59
  185. mixpeek/models/get_feature_v1_features_feature_id_getop.py +0 -70
  186. mixpeek/models/get_taxonomy_node_v1_entities_taxonomies_nodes_node_getop.py +0 -59
  187. mixpeek/models/groupedassetdata.py +0 -18
  188. mixpeek/models/imagedescribesettings.py +0 -82
  189. mixpeek/models/imagedetectsettings.py +0 -47
  190. mixpeek/models/imagereadsettings.py +0 -71
  191. mixpeek/models/imagesettings.py +0 -92
  192. mixpeek/models/listassetsrequest.py +0 -75
  193. mixpeek/models/listassetsresponse.py +0 -22
  194. mixpeek/models/listclassificationsrequest.py +0 -69
  195. mixpeek/models/listclassificationsresponse.py +0 -31
  196. mixpeek/models/listcollectionsresponse.py +0 -22
  197. mixpeek/models/listfeaturesrequest.py +0 -77
  198. mixpeek/models/listfeaturesresponse.py +0 -22
  199. mixpeek/models/listtaxonomiesresponse.py +0 -24
  200. mixpeek/models/modality.py +0 -13
  201. mixpeek/models/modeldetails.py +0 -61
  202. mixpeek/models/nodeoptions.py +0 -16
  203. mixpeek/models/patch_namespace_v1_namespaces_namespace_patchop.py +0 -28
  204. mixpeek/models/payloadindextype.py +0 -17
  205. mixpeek/models/processimageurlinput.py +0 -87
  206. mixpeek/models/processtextinput.py +0 -82
  207. mixpeek/models/processvideourlinput.py +0 -87
  208. mixpeek/models/querysettings.py +0 -56
  209. mixpeek/models/rerankingoptions.py +0 -47
  210. mixpeek/models/search_model_searchquery.py +0 -76
  211. mixpeek/models/searchassetsrequest.py +0 -78
  212. mixpeek/models/searchrequestfeatures.py +0 -153
  213. mixpeek/models/sparseembedding.py +0 -21
  214. mixpeek/models/taxonomycreate.py +0 -20
  215. mixpeek/models/taxonomyextractionconfig.py +0 -26
  216. mixpeek/models/taxonomymodel.py +0 -27
  217. mixpeek/models/taxonomynode.py +0 -101
  218. mixpeek/models/taxonomynodecreate.py +0 -63
  219. mixpeek/models/textsettings.py +0 -67
  220. mixpeek/models/updateassetrequest.py +0 -60
  221. mixpeek/models/usage.py +0 -18
  222. mixpeek/models/vectormodel.py +0 -15
  223. mixpeek/models/videodescribesettings.py +0 -82
  224. mixpeek/models/videodetectsettings.py +0 -47
  225. mixpeek/models/videoreadsettings.py +0 -71
  226. mixpeek/models/videosettings.py +0 -115
  227. mixpeek/models/videotranscriptionsettings.py +0 -69
  228. mixpeek-0.20.20.dist-info/RECORD +0 -201
@@ -1,10 +1,11 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from .basicfeatureextractor import BasicFeatureExtractor, BasicFeatureExtractorTypedDict
4
5
  from .payloadindexconfig import PayloadIndexConfig, PayloadIndexConfigTypedDict
5
6
  from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
6
7
  from pydantic import model_serializer
7
- from typing import List
8
+ from typing import List, Optional
8
9
  from typing_extensions import NotRequired, TypedDict
9
10
 
10
11
 
@@ -13,8 +14,10 @@ class CreateNamespaceRequestTypedDict(TypedDict):
13
14
 
14
15
  namespace_name: str
15
16
  r"""Name of the namespace to create"""
16
- embedding_models: List[str]
17
- r"""List of vector indexes to be used within this namespace. Must be one of: 'image', 'openai-clip-vit-base-patch32', 'multimodal', 'vertex-multimodal', 'text', 'baai-bge-m3', 'keyword', 'naver-splade-v3'"""
17
+ description: NotRequired[Nullable[str]]
18
+ r"""Description of the namespace"""
19
+ feature_extractors: NotRequired[List[BasicFeatureExtractorTypedDict]]
20
+ r"""List of feature extractors to use"""
18
21
  payload_indexes: NotRequired[Nullable[List[PayloadIndexConfigTypedDict]]]
19
22
  r"""List of payload index configurations"""
20
23
 
@@ -25,23 +28,26 @@ class CreateNamespaceRequest(BaseModel):
25
28
  namespace_name: str
26
29
  r"""Name of the namespace to create"""
27
30
 
28
- embedding_models: List[str]
29
- r"""List of vector indexes to be used within this namespace. Must be one of: 'image', 'openai-clip-vit-base-patch32', 'multimodal', 'vertex-multimodal', 'text', 'baai-bge-m3', 'keyword', 'naver-splade-v3'"""
31
+ description: OptionalNullable[str] = UNSET
32
+ r"""Description of the namespace"""
33
+
34
+ feature_extractors: Optional[List[BasicFeatureExtractor]] = None
35
+ r"""List of feature extractors to use"""
30
36
 
31
37
  payload_indexes: OptionalNullable[List[PayloadIndexConfig]] = UNSET
32
38
  r"""List of payload index configurations"""
33
39
 
34
40
  @model_serializer(mode="wrap")
35
41
  def serialize_model(self, handler):
36
- optional_fields = ["payload_indexes"]
37
- nullable_fields = ["payload_indexes"]
42
+ optional_fields = ["description", "feature_extractors", "payload_indexes"]
43
+ nullable_fields = ["description", "payload_indexes"]
38
44
  null_default_fields = []
39
45
 
40
46
  serialized = handler(self)
41
47
 
42
48
  m = {}
43
49
 
44
- for n, f in self.model_fields.items():
50
+ for n, f in type(self).model_fields.items():
45
51
  k = f.alias or n
46
52
  val = serialized.get(k)
47
53
  serialized.pop(k, None)
@@ -0,0 +1,75 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .createblobrequest import CreateBlobRequest, CreateBlobRequestTypedDict
5
+ from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
6
+ from pydantic import model_serializer
7
+ from typing import List, Optional
8
+ from typing_extensions import NotRequired, TypedDict
9
+
10
+
11
+ class CreateObjectRequestMetadataTypedDict(TypedDict):
12
+ r"""Additional metadata for the object, this will be appended in all downstream documents of the your connected collections."""
13
+
14
+
15
+ class CreateObjectRequestMetadata(BaseModel):
16
+ r"""Additional metadata for the object, this will be appended in all downstream documents of the your connected collections."""
17
+
18
+
19
+ class CreateObjectRequestTypedDict(TypedDict):
20
+ r"""Request model for creating a bucket object"""
21
+
22
+ key_prefix: NotRequired[Nullable[str]]
23
+ r"""Storage key/path prefix of the object, this will be used to retrieve the object from the storage. It's at the root of the object."""
24
+ blobs: NotRequired[List[CreateBlobRequestTypedDict]]
25
+ r"""List of blobs to be created in this object"""
26
+ metadata: NotRequired[CreateObjectRequestMetadataTypedDict]
27
+ r"""Additional metadata for the object, this will be appended in all downstream documents of the your connected collections."""
28
+ skip_duplicates: NotRequired[bool]
29
+ r"""Skip duplicate blobs, if a blob with the same hash already exists, it will be skipped."""
30
+
31
+
32
+ class CreateObjectRequest(BaseModel):
33
+ r"""Request model for creating a bucket object"""
34
+
35
+ key_prefix: OptionalNullable[str] = UNSET
36
+ r"""Storage key/path prefix of the object, this will be used to retrieve the object from the storage. It's at the root of the object."""
37
+
38
+ blobs: Optional[List[CreateBlobRequest]] = None
39
+ r"""List of blobs to be created in this object"""
40
+
41
+ metadata: Optional[CreateObjectRequestMetadata] = None
42
+ r"""Additional metadata for the object, this will be appended in all downstream documents of the your connected collections."""
43
+
44
+ skip_duplicates: Optional[bool] = False
45
+ r"""Skip duplicate blobs, if a blob with the same hash already exists, it will be skipped."""
46
+
47
+ @model_serializer(mode="wrap")
48
+ def serialize_model(self, handler):
49
+ optional_fields = ["key_prefix", "blobs", "metadata", "skip_duplicates"]
50
+ nullable_fields = ["key_prefix"]
51
+ null_default_fields = []
52
+
53
+ serialized = handler(self)
54
+
55
+ m = {}
56
+
57
+ for n, f in type(self).model_fields.items():
58
+ k = f.alias or n
59
+ val = serialized.get(k)
60
+ serialized.pop(k, None)
61
+
62
+ optional_nullable = k in optional_fields and k in nullable_fields
63
+ is_set = (
64
+ self.__pydantic_fields_set__.intersection({n})
65
+ or k in null_default_fields
66
+ ) # pylint: disable=no-member
67
+
68
+ if val is not None and val != UNSET_SENTINEL:
69
+ m[k] = val
70
+ elif val != UNSET_SENTINEL and (
71
+ not k in optional_fields or (optional_nullable and is_set)
72
+ ):
73
+ m[k] = val
74
+
75
+ return m
@@ -0,0 +1,90 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from .bucketschema_input import BucketSchemaInput, BucketSchemaInputTypedDict
5
+ from .retrievercacheconfig import RetrieverCacheConfig, RetrieverCacheConfigTypedDict
6
+ from .stageconfig_input import StageConfigInput, StageConfigInputTypedDict
7
+ from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
8
+ from pydantic import model_serializer
9
+ from typing import List, Optional
10
+ from typing_extensions import NotRequired, TypedDict
11
+
12
+
13
+ class CreateRetrieverRequestMetadataTypedDict(TypedDict):
14
+ pass
15
+
16
+
17
+ class CreateRetrieverRequestMetadata(BaseModel):
18
+ pass
19
+
20
+
21
+ class CreateRetrieverRequestTypedDict(TypedDict):
22
+ r"""Request to create a new retriever"""
23
+
24
+ retriever_name: str
25
+ r"""Name of the retriever"""
26
+ input_schema: BucketSchemaInputTypedDict
27
+ r"""Schema definition for bucket objects"""
28
+ collection_ids: List[str]
29
+ r"""List of collection IDs to search in"""
30
+ stages: List[StageConfigInputTypedDict]
31
+ r"""List of stages to execute in order"""
32
+ description: NotRequired[Nullable[str]]
33
+ r"""Description of the retriever"""
34
+ metadata: NotRequired[CreateRetrieverRequestMetadataTypedDict]
35
+ cache_config: NotRequired[Nullable[RetrieverCacheConfigTypedDict]]
36
+ r"""Configuration for retriever-level caching"""
37
+
38
+
39
+ class CreateRetrieverRequest(BaseModel):
40
+ r"""Request to create a new retriever"""
41
+
42
+ retriever_name: str
43
+ r"""Name of the retriever"""
44
+
45
+ input_schema: BucketSchemaInput
46
+ r"""Schema definition for bucket objects"""
47
+
48
+ collection_ids: List[str]
49
+ r"""List of collection IDs to search in"""
50
+
51
+ stages: List[StageConfigInput]
52
+ r"""List of stages to execute in order"""
53
+
54
+ description: OptionalNullable[str] = UNSET
55
+ r"""Description of the retriever"""
56
+
57
+ metadata: Optional[CreateRetrieverRequestMetadata] = None
58
+
59
+ cache_config: OptionalNullable[RetrieverCacheConfig] = UNSET
60
+ r"""Configuration for retriever-level caching"""
61
+
62
+ @model_serializer(mode="wrap")
63
+ def serialize_model(self, handler):
64
+ optional_fields = ["description", "metadata", "cache_config"]
65
+ nullable_fields = ["description", "cache_config"]
66
+ null_default_fields = []
67
+
68
+ serialized = handler(self)
69
+
70
+ m = {}
71
+
72
+ for n, f in type(self).model_fields.items():
73
+ k = f.alias or n
74
+ val = serialized.get(k)
75
+ serialized.pop(k, None)
76
+
77
+ optional_nullable = k in optional_fields and k in nullable_fields
78
+ is_set = (
79
+ self.__pydantic_fields_set__.intersection({n})
80
+ or k in null_default_fields
81
+ ) # pylint: disable=no-member
82
+
83
+ if val is not None and val != UNSET_SENTINEL:
84
+ m[k] = val
85
+ elif val != UNSET_SENTINEL and (
86
+ not k in optional_fields or (optional_nullable and is_set)
87
+ ):
88
+ m[k] = val
89
+
90
+ return m
@@ -1,40 +1,42 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from .taxonomyconfig import TaxonomyConfig, TaxonomyConfigTypedDict
4
5
  from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
5
6
  from pydantic import model_serializer
6
7
  from typing_extensions import NotRequired, TypedDict
7
8
 
8
9
 
9
- class TaxonomyUpdateTypedDict(TypedDict):
10
- r"""Model for updating specific taxonomy fields"""
10
+ class CreateTaxonomyRequestTypedDict(TypedDict):
11
+ r"""Request to create a new taxonomy"""
11
12
 
12
- taxonomy_name: NotRequired[Nullable[str]]
13
- r"""Updated taxonomy name (must not contain spaces or special characters)"""
13
+ taxonomy_name: str
14
+ config: TaxonomyConfigTypedDict
15
+ r"""Base configuration for all taxonomy types"""
14
16
  description: NotRequired[Nullable[str]]
15
- r"""Updated taxonomy description"""
16
17
 
17
18
 
18
- class TaxonomyUpdate(BaseModel):
19
- r"""Model for updating specific taxonomy fields"""
19
+ class CreateTaxonomyRequest(BaseModel):
20
+ r"""Request to create a new taxonomy"""
20
21
 
21
- taxonomy_name: OptionalNullable[str] = UNSET
22
- r"""Updated taxonomy name (must not contain spaces or special characters)"""
22
+ taxonomy_name: str
23
+
24
+ config: TaxonomyConfig
25
+ r"""Base configuration for all taxonomy types"""
23
26
 
24
27
  description: OptionalNullable[str] = UNSET
25
- r"""Updated taxonomy description"""
26
28
 
27
29
  @model_serializer(mode="wrap")
28
30
  def serialize_model(self, handler):
29
- optional_fields = ["taxonomy_name", "description"]
30
- nullable_fields = ["taxonomy_name", "description"]
31
+ optional_fields = ["description"]
32
+ nullable_fields = ["description"]
31
33
  null_default_fields = []
32
34
 
33
35
  serialized = handler(self)
34
36
 
35
37
  m = {}
36
38
 
37
- for n, f in self.model_fields.items():
39
+ for n, f in type(self).model_fields.items():
38
40
  k = f.alias or n
39
41
  val = serialized.get(k)
40
42
  serialized.pop(k, None)
@@ -8,14 +8,14 @@ from pydantic import model_serializer
8
8
  from typing_extensions import Annotated, NotRequired, TypedDict
9
9
 
10
10
 
11
- class DeleteAssetV1AssetsAssetIDDeleteRequestTypedDict(TypedDict):
12
- asset_id: str
11
+ class DeleteBucketV1BucketsBucketIdentifierDeleteRequestTypedDict(TypedDict):
12
+ bucket_identifier: str
13
13
  x_namespace: NotRequired[Nullable[str]]
14
14
  r"""Optional namespace for data isolation. This can be a namespace name or namespace ID. Example: 'netflix_prod' or 'ns_1234567890'. To create a namespace, use the /namespaces endpoint."""
15
15
 
16
16
 
17
- class DeleteAssetV1AssetsAssetIDDeleteRequest(BaseModel):
18
- asset_id: Annotated[
17
+ class DeleteBucketV1BucketsBucketIdentifierDeleteRequest(BaseModel):
18
+ bucket_identifier: Annotated[
19
19
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
20
20
  ]
21
21
 
@@ -36,7 +36,7 @@ class DeleteAssetV1AssetsAssetIDDeleteRequest(BaseModel):
36
36
 
37
37
  m = {}
38
38
 
39
- for n, f in self.model_fields.items():
39
+ for n, f in type(self).model_fields.items():
40
40
  k = f.alias or n
41
41
  val = serialized.get(k)
42
42
  serialized.pop(k, None)
@@ -8,29 +8,29 @@ from pydantic import model_serializer
8
8
  from typing_extensions import Annotated, NotRequired, TypedDict
9
9
 
10
10
 
11
- class DeleteClassificationsV1EntitiesTaxonomiesTaxonomyClassificationsClassificationIDDeleteRequestTypedDict(
11
+ class DeleteDocumentV1CollectionsCollectionIdentifierDocumentsDocumentIDDeleteRequestTypedDict(
12
12
  TypedDict
13
13
  ):
14
- taxonomy: str
15
- r"""The id or name of the taxonomy"""
16
- classification_id: str
17
- r"""The id of the classification to delete"""
14
+ collection_identifier: str
15
+ r"""The ID of the collection"""
16
+ document_id: str
17
+ r"""The ID of the document to delete"""
18
18
  x_namespace: NotRequired[Nullable[str]]
19
19
  r"""Optional namespace for data isolation. This can be a namespace name or namespace ID. Example: 'netflix_prod' or 'ns_1234567890'. To create a namespace, use the /namespaces endpoint."""
20
20
 
21
21
 
22
- class DeleteClassificationsV1EntitiesTaxonomiesTaxonomyClassificationsClassificationIDDeleteRequest(
22
+ class DeleteDocumentV1CollectionsCollectionIdentifierDocumentsDocumentIDDeleteRequest(
23
23
  BaseModel
24
24
  ):
25
- taxonomy: Annotated[
25
+ collection_identifier: Annotated[
26
26
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
27
27
  ]
28
- r"""The id or name of the taxonomy"""
28
+ r"""The ID of the collection"""
29
29
 
30
- classification_id: Annotated[
30
+ document_id: Annotated[
31
31
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
32
32
  ]
33
- r"""The id of the classification to delete"""
33
+ r"""The ID of the document to delete"""
34
34
 
35
35
  x_namespace: Annotated[
36
36
  OptionalNullable[str],
@@ -49,7 +49,7 @@ class DeleteClassificationsV1EntitiesTaxonomiesTaxonomyClassificationsClassifica
49
49
 
50
50
  m = {}
51
51
 
52
- for n, f in self.model_fields.items():
52
+ for n, f in type(self).model_fields.items():
53
53
  k = f.alias or n
54
54
  val = serialized.get(k)
55
55
  serialized.pop(k, None)
@@ -8,18 +8,18 @@ from pydantic import model_serializer
8
8
  from typing_extensions import Annotated, NotRequired, TypedDict
9
9
 
10
10
 
11
- class GetTaxonomyV1EntitiesTaxonomiesTaxonomyGetRequestTypedDict(TypedDict):
12
- taxonomy: str
13
- r"""The name or id of the taxonomy to find"""
11
+ class DeleteInteractionV1RetrieversInteractionsInteractionIDDeleteRequestTypedDict(
12
+ TypedDict
13
+ ):
14
+ interaction_id: str
14
15
  x_namespace: NotRequired[Nullable[str]]
15
16
  r"""Optional namespace for data isolation. This can be a namespace name or namespace ID. Example: 'netflix_prod' or 'ns_1234567890'. To create a namespace, use the /namespaces endpoint."""
16
17
 
17
18
 
18
- class GetTaxonomyV1EntitiesTaxonomiesTaxonomyGetRequest(BaseModel):
19
- taxonomy: Annotated[
19
+ class DeleteInteractionV1RetrieversInteractionsInteractionIDDeleteRequest(BaseModel):
20
+ interaction_id: Annotated[
20
21
  str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
21
22
  ]
22
- r"""The name or id of the taxonomy to find"""
23
23
 
24
24
  x_namespace: Annotated[
25
25
  OptionalNullable[str],
@@ -38,7 +38,7 @@ class GetTaxonomyV1EntitiesTaxonomiesTaxonomyGetRequest(BaseModel):
38
38
 
39
39
  m = {}
40
40
 
41
- for n, f in self.model_fields.items():
41
+ for n, f in type(self).model_fields.items():
42
42
  k = f.alias or n
43
43
  val = serialized.get(k)
44
44
  serialized.pop(k, None)
@@ -0,0 +1,70 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
5
+ from mixpeek.utils import FieldMetadata, HeaderMetadata, PathParamMetadata
6
+ import pydantic
7
+ from pydantic import model_serializer
8
+ from typing_extensions import Annotated, NotRequired, TypedDict
9
+
10
+
11
+ class DeleteObjectV1BucketsBucketIdentifierObjectsObjectIdentifierDeleteRequestTypedDict(
12
+ TypedDict
13
+ ):
14
+ bucket_identifier: str
15
+ r"""Identifier of the bucket"""
16
+ object_identifier: str
17
+ r"""Identifier of the object"""
18
+ x_namespace: NotRequired[Nullable[str]]
19
+ r"""Optional namespace for data isolation. This can be a namespace name or namespace ID. Example: 'netflix_prod' or 'ns_1234567890'. To create a namespace, use the /namespaces endpoint."""
20
+
21
+
22
+ class DeleteObjectV1BucketsBucketIdentifierObjectsObjectIdentifierDeleteRequest(
23
+ BaseModel
24
+ ):
25
+ bucket_identifier: Annotated[
26
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
27
+ ]
28
+ r"""Identifier of the bucket"""
29
+
30
+ object_identifier: Annotated[
31
+ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))
32
+ ]
33
+ r"""Identifier of the object"""
34
+
35
+ x_namespace: Annotated[
36
+ OptionalNullable[str],
37
+ pydantic.Field(alias="X-Namespace"),
38
+ FieldMetadata(header=HeaderMetadata(style="simple", explode=False)),
39
+ ] = UNSET
40
+ r"""Optional namespace for data isolation. This can be a namespace name or namespace ID. Example: 'netflix_prod' or 'ns_1234567890'. To create a namespace, use the /namespaces endpoint."""
41
+
42
+ @model_serializer(mode="wrap")
43
+ def serialize_model(self, handler):
44
+ optional_fields = ["X-Namespace"]
45
+ nullable_fields = ["X-Namespace"]
46
+ null_default_fields = []
47
+
48
+ serialized = handler(self)
49
+
50
+ m = {}
51
+
52
+ for n, f in type(self).model_fields.items():
53
+ k = f.alias or n
54
+ val = serialized.get(k)
55
+ serialized.pop(k, None)
56
+
57
+ optional_nullable = k in optional_fields and k in nullable_fields
58
+ is_set = (
59
+ self.__pydantic_fields_set__.intersection({n})
60
+ or k in null_default_fields
61
+ ) # pylint: disable=no-member
62
+
63
+ if val is not None and val != UNSET_SENTINEL:
64
+ m[k] = val
65
+ elif val != UNSET_SENTINEL and (
66
+ not k in optional_fields or (optional_nullable and is_set)
67
+ ):
68
+ m[k] = val
69
+
70
+ return m
@@ -1,39 +1,36 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
- from .inputtype import InputType
5
- from .vectormodel import VectorModel
4
+ from .documentoutputtype import DocumentOutputType
6
5
  from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
7
6
  from pydantic import model_serializer
8
7
  from typing_extensions import NotRequired, TypedDict
9
8
 
10
9
 
11
- class FeatureExtractionEmbeddingRequestTypedDict(TypedDict):
12
- type: InputType
13
- embedding_model: VectorModel
14
- value: NotRequired[Nullable[str]]
15
- r"""The input content to embed. Could be a URL, text content, file path, or base64 encoded string"""
10
+ class DocumentHandlingConfigTypedDict(TypedDict):
11
+ r"""Configuration for how documents are handled during processing and updates"""
16
12
 
13
+ output_type: NotRequired[Nullable[DocumentOutputType]]
14
+ r"""How this extractor produces documents (single or multiple)"""
17
15
 
18
- class FeatureExtractionEmbeddingRequest(BaseModel):
19
- type: InputType
20
16
 
21
- embedding_model: VectorModel
17
+ class DocumentHandlingConfig(BaseModel):
18
+ r"""Configuration for how documents are handled during processing and updates"""
22
19
 
23
- value: OptionalNullable[str] = UNSET
24
- r"""The input content to embed. Could be a URL, text content, file path, or base64 encoded string"""
20
+ output_type: OptionalNullable[DocumentOutputType] = UNSET
21
+ r"""How this extractor produces documents (single or multiple)"""
25
22
 
26
23
  @model_serializer(mode="wrap")
27
24
  def serialize_model(self, handler):
28
- optional_fields = ["value"]
29
- nullable_fields = ["value"]
25
+ optional_fields = ["output_type"]
26
+ nullable_fields = ["output_type"]
30
27
  null_default_fields = []
31
28
 
32
29
  serialized = handler(self)
33
30
 
34
31
  m = {}
35
32
 
36
- for n, f in self.model_fields.items():
33
+ for n, f in type(self).model_fields.items():
37
34
  k = f.alias or n
38
35
  val = serialized.get(k)
39
36
  serialized.pop(k, None)
@@ -0,0 +1,11 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from enum import Enum
5
+
6
+
7
+ class DocumentInputHandling(str, Enum):
8
+ r"""How documents are provided to the feature extractor"""
9
+
10
+ INDIVIDUAL = "individual"
11
+ GROUPED = "grouped"
@@ -1,40 +1,41 @@
1
1
  """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
2
 
3
3
  from __future__ import annotations
4
+ from .documentresponse import DocumentResponse, DocumentResponseTypedDict
4
5
  from mixpeek.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
5
6
  from pydantic import model_serializer
7
+ from typing import List
6
8
  from typing_extensions import NotRequired, TypedDict
7
9
 
8
10
 
9
- class ResponseShapeTypedDict(TypedDict):
10
- pass
11
+ class DocumentListResponseTypedDict(TypedDict):
12
+ r"""Model for paginated document list response"""
11
13
 
14
+ items: List[DocumentResponseTypedDict]
15
+ total: int
16
+ next_offset_id: NotRequired[Nullable[str]]
12
17
 
13
- class ResponseShape(BaseModel):
14
- pass
15
18
 
19
+ class DocumentListResponse(BaseModel):
20
+ r"""Model for paginated document list response"""
16
21
 
17
- class JSONTextOutputSettingsTypedDict(TypedDict):
18
- response_shape: NotRequired[Nullable[ResponseShapeTypedDict]]
19
- prompt: NotRequired[Nullable[str]]
22
+ items: List[DocumentResponse]
20
23
 
24
+ total: int
21
25
 
22
- class JSONTextOutputSettings(BaseModel):
23
- response_shape: OptionalNullable[ResponseShape] = UNSET
24
-
25
- prompt: OptionalNullable[str] = UNSET
26
+ next_offset_id: OptionalNullable[str] = UNSET
26
27
 
27
28
  @model_serializer(mode="wrap")
28
29
  def serialize_model(self, handler):
29
- optional_fields = ["response_shape", "prompt"]
30
- nullable_fields = ["response_shape", "prompt"]
30
+ optional_fields = ["next_offset_id"]
31
+ nullable_fields = ["next_offset_id"]
31
32
  null_default_fields = []
32
33
 
33
34
  serialized = handler(self)
34
35
 
35
36
  m = {}
36
37
 
37
- for n, f in self.model_fields.items():
38
+ for n, f in type(self).model_fields.items():
38
39
  k = f.alias or n
39
40
  val = serialized.get(k)
40
41
  serialized.pop(k, None)
@@ -0,0 +1,10 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from enum import Enum
5
+
6
+
7
+ class DocumentOutputHandling(str, Enum):
8
+ r"""Enum for document output handling types"""
9
+
10
+ CREATE_NEW = "create_new"
@@ -0,0 +1,11 @@
1
+ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
2
+
3
+ from __future__ import annotations
4
+ from enum import Enum
5
+
6
+
7
+ class DocumentOutputType(str, Enum):
8
+ r"""Enum for document output types"""
9
+
10
+ SINGLE = "single"
11
+ MULTIPLE = "multiple"