mlrun 1.8.0rc2__py3-none-any.whl → 1.8.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mlrun might be problematic. Click here for more details.

@@ -23,6 +23,7 @@ from .base import (
23
23
  get_artifact_meta,
24
24
  )
25
25
  from .dataset import DatasetArtifact, TableArtifact, update_dataset_meta
26
+ from .document import DocumentArtifact, DocumentLoader, DocumentLoaderSpec
26
27
  from .manager import (
27
28
  ArtifactManager,
28
29
  ArtifactProducer,
@@ -0,0 +1,313 @@
1
+ # Copyright 2024 Iguazio
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import ast
16
+ import re
17
+ import tempfile
18
+ from collections.abc import Iterator
19
+ from copy import deepcopy
20
+ from importlib import import_module
21
+ from typing import Optional, Union
22
+
23
+ import mlrun
24
+ from mlrun.artifacts import Artifact, ArtifactSpec
25
+ from mlrun.model import ModelObj
26
+
27
+ from ..utils import generate_artifact_uri
28
+
29
+
30
+ class DocumentLoaderSpec(ModelObj):
31
+ """
32
+ A class to load a document from a file path using a specified loader class.
33
+
34
+ This class is responsible for loading documents from a given source path using a specified loader class.
35
+ The loader class is dynamically imported and instantiated with the provided arguments. The loaded documents
36
+ can be optionally uploaded as artifacts.
37
+
38
+ Attributes:
39
+ loader_class_name (str): The name of the loader class to use for loading documents.
40
+ src_name (str): The name of the source attribute to pass to the loader class.
41
+ kwargs (Optional[dict]): Additional keyword arguments to pass to the loader class.
42
+
43
+ Methods:
44
+ make_loader(src_path): Creates an instance of the loader class with the specified source path.
45
+ """
46
+
47
+ _dict_fields = ["loader_class_name", "src_name", "kwargs"]
48
+
49
+ def __init__(
50
+ self,
51
+ loader_class_name: str = "langchain_community.document_loaders.TextLoader",
52
+ src_name: str = "file_path",
53
+ kwargs: Optional[dict] = None,
54
+ ):
55
+ """
56
+ Initialize the document loader.
57
+
58
+ Args:
59
+ loader_class_name (str): The name of the loader class to use.
60
+ src_name (str): The source name for the document.
61
+ kwargs (Optional[dict]): Additional keyword arguments to pass to the loader class.
62
+ """
63
+ self.loader_class_name = loader_class_name
64
+ self.src_name = src_name
65
+ self.kwargs = kwargs
66
+
67
+ def make_loader(self, src_path):
68
+ module_name, class_name = self.loader_class_name.rsplit(".", 1)
69
+ module = import_module(module_name)
70
+ loader_class = getattr(module, class_name)
71
+ kwargs = deepcopy(self.kwargs or {})
72
+ kwargs[self.src_name] = src_path
73
+ loader = loader_class(**kwargs)
74
+ return loader
75
+
76
+
77
+ class DocumentLoader:
78
+ """
79
+ A factory class for creating instances of a dynamically defined document loader.
80
+
81
+ Args:
82
+ artifact_key (str): The key for the artifact to be logged.It can include '%%' which will be replaced
83
+ by a hex-encoded version of the source path.
84
+ source_path (str): The source path of the document to be loaded.
85
+ loader_spec (DocumentLoaderSpec): Specification for the document loader.
86
+ producer (Optional[Union[MlrunProject, str, MLClientCtx]], optional): The producer of the document
87
+ upload (bool, optional): Flag indicating whether to upload the document.
88
+
89
+ Returns:
90
+ DynamicDocumentLoader: An instance of a dynamically defined subclass of BaseLoader.
91
+ """
92
+
93
+ def __new__(
94
+ cls,
95
+ source_path: str,
96
+ loader_spec: "DocumentLoaderSpec",
97
+ artifact_key="doc%%",
98
+ producer: Optional[Union["MlrunProject", str, "MLClientCtx"]] = None, # noqa: F821
99
+ upload: bool = False,
100
+ ):
101
+ # Dynamically import BaseLoader
102
+ from langchain_community.document_loaders.base import BaseLoader
103
+
104
+ class DynamicDocumentLoader(BaseLoader):
105
+ def __init__(
106
+ self,
107
+ source_path,
108
+ loader_spec,
109
+ artifact_key,
110
+ producer,
111
+ upload,
112
+ ):
113
+ self.producer = producer
114
+ self.artifact_key = (
115
+ DocumentLoader.artifact_key_instance(artifact_key, source_path)
116
+ if "%%" in artifact_key
117
+ else artifact_key
118
+ )
119
+ self.loader_spec = loader_spec
120
+ self.source_path = source_path
121
+ self.upload = upload
122
+
123
+ # Resolve the producer
124
+ if not self.producer:
125
+ self.producer = mlrun.mlconf.default_project
126
+ if isinstance(self.producer, str):
127
+ self.producer = mlrun.get_or_create_project(self.producer)
128
+
129
+ def lazy_load(self) -> Iterator["Document"]: # noqa: F821
130
+ artifact = self.producer.log_document(
131
+ key=self.artifact_key,
132
+ document_loader=self.loader_spec,
133
+ src_path=self.source_path,
134
+ upload=self.upload,
135
+ )
136
+ yield artifact.to_langchain_documents()
137
+
138
+ # Return an instance of the dynamically defined subclass
139
+ instance = DynamicDocumentLoader(
140
+ artifact_key=artifact_key,
141
+ source_path=source_path,
142
+ loader_spec=loader_spec,
143
+ producer=producer,
144
+ upload=upload,
145
+ )
146
+ return instance
147
+
148
+ @staticmethod
149
+ def artifact_key_instance(artifact_key: str, src_path: str) -> str:
150
+ if "%%" in artifact_key:
151
+ pattern = mlrun.utils.regex.artifact_key[0]
152
+ # Convert anchored pattern (^...$) to non-anchored version for finditer
153
+ search_pattern = pattern.strip("^$")
154
+ result = []
155
+ current_pos = 0
156
+
157
+ # Find all valid sequences
158
+ for match in re.finditer(search_pattern, src_path):
159
+ # Add hex values for characters between matches
160
+ for char in src_path[current_pos : match.start()]:
161
+ result.append(hex(ord(char))[2:].zfill(2))
162
+
163
+ # Add the valid sequence
164
+ result.append(match.group())
165
+ current_pos = match.end()
166
+
167
+ # Handle any remaining characters after the last match
168
+ for char in src_path[current_pos:]:
169
+ result.append(hex(ord(char))[2:].zfill(2))
170
+
171
+ resolved_path = "".join(result)
172
+
173
+ artifact_key = artifact_key.replace("%%", resolved_path)
174
+
175
+ return artifact_key
176
+
177
+
178
+ class DocumentArtifact(Artifact):
179
+ """
180
+ A specific artifact class inheriting from generic artifact, used to maintain Document meta-data.
181
+
182
+ Methods:
183
+ to_langchain_documents(splitter): Create LC documents from the artifact.
184
+ collection_add(collection_id): Add a collection ID to the artifact.
185
+ collection_remove(collection_id): Remove a collection ID from the artifact.
186
+ """
187
+
188
+ class DocumentArtifactSpec(ArtifactSpec):
189
+ _dict_fields = ArtifactSpec._dict_fields + [
190
+ "document_loader",
191
+ "collections",
192
+ "original_source",
193
+ ]
194
+
195
+ def __init__(
196
+ self,
197
+ *args,
198
+ **kwargs,
199
+ ):
200
+ super().__init__(*args, **kwargs)
201
+ self.document_loader = None
202
+ self.collections = set()
203
+ self.original_source = None
204
+
205
+ """
206
+ A specific artifact class inheriting from generic artifact, used to maintain Document meta-data.
207
+ """
208
+
209
+ kind = "document"
210
+
211
+ METADATA_SOURCE_KEY = "source"
212
+ METADATA_ORIGINAL_SOURCE_KEY = "original_source"
213
+ METADATA_CHUNK_KEY = "mlrun_chunk"
214
+ METADATA_ARTIFACT_URI_KEY = "mlrun_object_uri"
215
+ METADATA_ARTIFACT_TARGET_PATH_KEY = "mlrun_target_path"
216
+
217
+ def __init__(
218
+ self,
219
+ key=None,
220
+ document_loader: DocumentLoaderSpec = DocumentLoaderSpec(),
221
+ **kwargs,
222
+ ):
223
+ super().__init__(key, **kwargs)
224
+ self.spec.document_loader = document_loader.to_str()
225
+ if "src_path" in kwargs:
226
+ self.spec.original_source = kwargs["src_path"]
227
+
228
+ @property
229
+ def spec(self) -> DocumentArtifactSpec:
230
+ return self._spec
231
+
232
+ @spec.setter
233
+ def spec(self, spec):
234
+ self._spec = self._verify_dict(
235
+ spec, "spec", DocumentArtifact.DocumentArtifactSpec
236
+ )
237
+ # _verify_dict doesn't handle set, so we need to convert it back
238
+ if isinstance(self._spec.collections, str):
239
+ self._spec.collections = ast.literal_eval(self._spec.collections)
240
+
241
+ @property
242
+ def inputs(self):
243
+ # To keep the interface consistent with the project.update_artifact() when we update the artifact
244
+ return None
245
+
246
+ @property
247
+ def source(self):
248
+ return generate_artifact_uri(self.metadata.project, self.spec.db_key)
249
+
250
+ def to_langchain_documents(
251
+ self,
252
+ splitter: Optional["TextSplitter"] = None, # noqa: F821
253
+ ) -> list["Document"]: # noqa: F821
254
+ from langchain.schema import Document
255
+
256
+ """
257
+ Create LC documents from the artifact
258
+
259
+ Args:
260
+ splitter (Optional[TextSplitter]): A LangChain TextSplitter to split the document into chunks.
261
+
262
+ Returns:
263
+ list[Document]: A list of LangChain Document objects.
264
+ """
265
+ dictionary = ast.literal_eval(self.spec.document_loader)
266
+ loader_spec = DocumentLoaderSpec.from_dict(dictionary)
267
+
268
+ if self.get_target_path():
269
+ with tempfile.NamedTemporaryFile() as tmp_file:
270
+ mlrun.datastore.store_manager.object(
271
+ url=self.get_target_path()
272
+ ).download(tmp_file.name)
273
+ loader = loader_spec.make_loader(tmp_file.name)
274
+ documents = loader.load()
275
+ elif self.src_path:
276
+ loader = loader_spec.make_loader(self.src_path)
277
+ documents = loader.load()
278
+ else:
279
+ raise ValueError(
280
+ "No src_path or target_path provided. Cannot load document."
281
+ )
282
+
283
+ results = []
284
+ for document in documents:
285
+ if splitter:
286
+ texts = splitter.split_text(document.page_content)
287
+ else:
288
+ texts = [document.page_content]
289
+
290
+ metadata = document.metadata
291
+
292
+ metadata[self.METADATA_ORIGINAL_SOURCE_KEY] = self.src_path
293
+ metadata[self.METADATA_SOURCE_KEY] = self.source
294
+ metadata[self.METADATA_ARTIFACT_URI_KEY] = self.uri
295
+ if self.get_target_path():
296
+ metadata[self.METADATA_ARTIFACT_TARGET_PATH_KEY] = (
297
+ self.get_target_path()
298
+ )
299
+
300
+ for idx, text in enumerate(texts):
301
+ metadata[self.METADATA_CHUNK_KEY] = str(idx)
302
+ doc = Document(
303
+ page_content=text,
304
+ metadata=metadata,
305
+ )
306
+ results.append(doc)
307
+ return results
308
+
309
+ def collection_add(self, collection_id: str) -> None:
310
+ self.spec.collections.add(collection_id)
311
+
312
+ def collection_remove(self, collection_id: str) -> None:
313
+ return self.spec.collections.discard(collection_id)
@@ -41,6 +41,7 @@ from .dataset import (
41
41
  DatasetArtifact,
42
42
  TableArtifact,
43
43
  )
44
+ from .document import DocumentArtifact
44
45
  from .model import ModelArtifact
45
46
  from .plots import (
46
47
  PlotArtifact,
@@ -57,6 +58,7 @@ artifact_types = {
57
58
  "model": ModelArtifact,
58
59
  "dataset": DatasetArtifact,
59
60
  "plotly": PlotlyArtifact,
61
+ "document": DocumentArtifact,
60
62
  }
61
63
 
62
64
 
@@ -62,6 +62,7 @@ from .clusterization_spec import (
62
62
  from .common import ImageBuilder
63
63
  from .constants import (
64
64
  APIStates,
65
+ ArtifactPartitionByField,
65
66
  ClusterizationRole,
66
67
  DeletionStrategy,
67
68
  FeatureStorePartitionByField,
@@ -133,6 +133,21 @@ class RunPartitionByField(mlrun.common.types.StrEnum):
133
133
  )
134
134
 
135
135
 
136
+ class ArtifactPartitionByField(mlrun.common.types.StrEnum):
137
+ name = "name" # Supported for artifacts objects
138
+ project_and_name = "project_and_name" # Supported for artifacts objects
139
+
140
+ def to_partition_by_db_field(self, db_cls):
141
+ if self.value == ArtifactPartitionByField.name:
142
+ return db_cls.key
143
+ elif self.value == ArtifactPartitionByField.project_and_name:
144
+ return db_cls.project, db_cls.key
145
+ else:
146
+ raise mlrun.errors.MLRunInvalidArgumentError(
147
+ f"Unknown group by field: {self.value}"
148
+ )
149
+
150
+
136
151
  class SortField(mlrun.common.types.StrEnum):
137
152
  created = "created"
138
153
  updated = "updated"
mlrun/config.py CHANGED
@@ -533,7 +533,7 @@ default_config = {
533
533
  "verbose": True,
534
534
  },
535
535
  "pagination": {
536
- "default_page_size": 20,
536
+ "default_page_size": 200,
537
537
  "pagination_cache": {
538
538
  "interval": 60,
539
539
  "ttl": 3600,
@@ -81,6 +81,24 @@ class DatastoreProfileBasic(DatastoreProfile):
81
81
  private: typing.Optional[str] = None
82
82
 
83
83
 
84
+ class VectorStoreProfile(DatastoreProfile):
85
+ type: str = pydantic.Field("vector")
86
+ _private_attributes = ("kwargs_private",)
87
+ vector_store_class: str
88
+ kwargs_public: typing.Optional[dict] = None
89
+ kwargs_private: typing.Optional[dict] = None
90
+
91
+ def attributes(self, kwargs=None):
92
+ attributes = {}
93
+ if self.kwargs_public:
94
+ attributes = merge(attributes, self.kwargs_public)
95
+ if self.kwargs_private:
96
+ attributes = merge(attributes, self.kwargs_private)
97
+ if kwargs:
98
+ attributes = merge(attributes, kwargs)
99
+ return attributes
100
+
101
+
84
102
  class DatastoreProfileKafkaTarget(DatastoreProfile):
85
103
  type: str = pydantic.v1.Field("kafka_target")
86
104
  _private_attributes = "kwargs_private"
@@ -476,6 +494,7 @@ class DatastoreProfile2Json(pydantic.v1.BaseModel):
476
494
  "gcs": DatastoreProfileGCS,
477
495
  "az": DatastoreProfileAzureBlob,
478
496
  "hdfs": DatastoreProfileHdfs,
497
+ "vector": VectorStoreProfile,
479
498
  }
480
499
  if datastore_type in ds_profile_factory:
481
500
  return ds_profile_factory[datastore_type].parse_obj(decoded_dict)
@@ -0,0 +1,186 @@
1
+ # Copyright 2024 Iguazio
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from importlib import import_module
17
+ from typing import Union
18
+
19
+ from mlrun.artifacts import DocumentArtifact
20
+
21
+
22
+ class VectorStoreCollection:
23
+ """
24
+ VectorStoreCollection is a class that manages a collection of vector stores, providing methods to add and delete
25
+ documents and artifacts, and to interact with an MLRun context.
26
+
27
+ Attributes:
28
+ _collection_impl (object): The underlying collection implementation.
29
+ _mlrun_context (Union[MlrunProject, MLClientCtx]): The MLRun context associated with the collection.
30
+ collection_name (str): The name of the collection.
31
+ id (str): The unique identifier of the collection, composed of the datastore profile and collection name.
32
+
33
+ Methods:
34
+ add_documents(documents: list["Document"], **kwargs):
35
+ Adds a list of documents to the collection and updates the MLRun artifacts associated with the documents
36
+ if an MLRun context is present.
37
+
38
+ add_artifacts(artifacts: list[DocumentArtifact], splitter=None, **kwargs):
39
+ Adds a list of DocumentArtifact objects to the collection, optionally using a splitter to convert
40
+ artifacts to documents.
41
+
42
+ remove_itself_from_artifact(artifact: DocumentArtifact):
43
+ Removes the current object from the given artifact's collection and updates the artifact.
44
+
45
+ delete_artifacts(artifacts: list[DocumentArtifact]):
46
+ Deletes a list of DocumentArtifact objects from the collection and updates the MLRun context.
47
+ Raises NotImplementedError if the delete operation is not supported for the collection implementation.
48
+ """
49
+
50
+ def __init__(
51
+ self,
52
+ vector_store_class: str,
53
+ mlrun_context: Union["MlrunProject", "MLClientCtx"], # noqa: F821
54
+ datastore_profile: str,
55
+ collection_name: str,
56
+ **kwargs,
57
+ ):
58
+ # Import the vector store class dynamically
59
+ module_name, class_name = vector_store_class.rsplit(".", 1)
60
+ module = import_module(module_name)
61
+ vector_store_class = getattr(module, class_name)
62
+
63
+ signature = inspect.signature(vector_store_class)
64
+
65
+ # Create the vector store instance
66
+ if "collection_name" in signature.parameters.keys():
67
+ vector_store = vector_store_class(collection_name=collection_name, **kwargs)
68
+ else:
69
+ vector_store = vector_store_class(**kwargs)
70
+
71
+ self._collection_impl = vector_store
72
+ self._mlrun_context = mlrun_context
73
+ self.collection_name = collection_name
74
+ self.id = datastore_profile + "/" + collection_name
75
+
76
+ def __getattr__(self, name):
77
+ # This method is called when an attribute is not found in the usual places
78
+ # Forward the attribute access to _collection_impl
79
+ return getattr(self._collection_impl, name)
80
+
81
+ def __setattr__(self, name, value):
82
+ if name in ["_collection_impl", "_mlrun_context"] or name in self.__dict__:
83
+ # Use the base class method to avoid recursion
84
+ super().__setattr__(name, value)
85
+ else:
86
+ # Forward the attribute setting to _collection_impl
87
+ setattr(self._collection_impl, name, value)
88
+
89
+ def add_documents(
90
+ self,
91
+ documents: list["Document"], # noqa: F821
92
+ **kwargs,
93
+ ):
94
+ """
95
+ Add a list of documents to the collection.
96
+
97
+ If the instance has an MLRun context, it will update the MLRun artifacts
98
+ associated with the documents.
99
+
100
+ Args:
101
+ documents (list[Document]): A list of Document objects to be added.
102
+ **kwargs: Additional keyword arguments to be passed to the underlying
103
+ collection implementation.
104
+
105
+ Returns:
106
+ The result of the underlying collection implementation's add_documents method.
107
+ """
108
+ if self._mlrun_context:
109
+ for document in documents:
110
+ mlrun_uri = document.metadata.get(
111
+ DocumentArtifact.METADATA_ARTIFACT_URI_KEY
112
+ )
113
+ if mlrun_uri:
114
+ artifact = self._mlrun_context.get_store_resource(mlrun_uri)
115
+ artifact.collection_add(self.id)
116
+ self._mlrun_context.update_artifact(artifact)
117
+ return self._collection_impl.add_documents(documents, **kwargs)
118
+
119
+ def add_artifacts(self, artifacts: list[DocumentArtifact], splitter=None, **kwargs):
120
+ """
121
+ Add a list of DocumentArtifact objects to the collection.
122
+
123
+ Args:
124
+ artifacts (list[DocumentArtifact]): A list of DocumentArtifact objects to be added.
125
+ splitter (optional): An optional splitter to be used when converting artifacts to documents.
126
+ **kwargs: Additional keyword arguments to be passed to the collection's add_documents method.
127
+
128
+ Returns:
129
+ list: A list of IDs of the added documents.
130
+ """
131
+ all_ids = []
132
+ for artifact in artifacts:
133
+ documents = artifact.to_langchain_documents(splitter)
134
+ artifact.collection_add(self.id)
135
+ self._mlrun_context.update_artifact(artifact)
136
+ ids = self._collection_impl.add_documents(documents, **kwargs)
137
+ all_ids.extend(ids)
138
+ return all_ids
139
+
140
+ def remove_itself_from_artifact(self, artifact: DocumentArtifact):
141
+ """
142
+ Remove the current object from the given artifact's collection and update the artifact.
143
+
144
+ Args:
145
+ artifact (DocumentArtifact): The artifact from which the current object should be removed.
146
+ """
147
+ artifact.collection_remove(self.id)
148
+ self._mlrun_context.update_artifact(artifact)
149
+
150
+ def delete_artifacts(self, artifacts: list[DocumentArtifact]):
151
+ """
152
+ Delete a list of DocumentArtifact objects from the collection.
153
+
154
+ This method removes the specified artifacts from the collection and updates the MLRun context.
155
+ The deletion process varies depending on the type of the underlying collection implementation.
156
+
157
+ Args:
158
+ artifacts (list[DocumentArtifact]): A list of DocumentArtifact objects to be deleted.
159
+
160
+ Raises:
161
+ NotImplementedError: If the delete operation is not supported for the collection implementation.
162
+ """
163
+ store_class = self._collection_impl.__class__.__name__.lower()
164
+ for artifact in artifacts:
165
+ artifact.collection_remove(self.id)
166
+ self._mlrun_context.update_artifact(artifact)
167
+ if store_class == "milvus":
168
+ expr = f"{DocumentArtifact.METADATA_SOURCE_KEY} == '{artifact.source}'"
169
+ return self._collection_impl.delete(expr=expr)
170
+ elif store_class == "chroma":
171
+ where = {DocumentArtifact.METADATA_SOURCE_KEY: artifact.source}
172
+ return self._collection_impl.delete(where=where)
173
+
174
+ elif (
175
+ hasattr(self._collection_impl, "delete")
176
+ and "filter"
177
+ in inspect.signature(self._collection_impl.delete).parameters
178
+ ):
179
+ filter = {
180
+ "metadata": {DocumentArtifact.METADATA_SOURCE_KEY: artifact.source}
181
+ }
182
+ return self._collection_impl.delete(filter=filter)
183
+ else:
184
+ raise NotImplementedError(
185
+ f"delete_artifacts() operation not supported for {store_class}"
186
+ )
mlrun/db/base.py CHANGED
@@ -158,6 +158,16 @@ class RunDBInterface(ABC):
158
158
  tree: Optional[str] = None,
159
159
  format_: mlrun.common.formatters.ArtifactFormat = mlrun.common.formatters.ArtifactFormat.full,
160
160
  limit: Optional[int] = None,
161
+ partition_by: Optional[
162
+ Union[mlrun.common.schemas.ArtifactPartitionByField, str]
163
+ ] = None,
164
+ rows_per_partition: int = 1,
165
+ partition_sort_by: Optional[
166
+ Union[mlrun.common.schemas.SortField, str]
167
+ ] = mlrun.common.schemas.SortField.updated,
168
+ partition_order: Union[
169
+ mlrun.common.schemas.OrderType, str
170
+ ] = mlrun.common.schemas.OrderType.desc,
161
171
  ):
162
172
  pass
163
173