cognite-toolkit 0.6.105__py3-none-any.whl → 0.6.107__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. cognite_toolkit/_cdf_tk/apps/_migrate_app.py +147 -0
  2. cognite_toolkit/_cdf_tk/commands/_migrate/conversion.py +6 -3
  3. cognite_toolkit/_cdf_tk/commands/_migrate/data_classes.py +17 -1
  4. cognite_toolkit/_cdf_tk/commands/_migrate/data_mapper.py +11 -8
  5. cognite_toolkit/_cdf_tk/commands/_migrate/default_mappings.py +5 -5
  6. cognite_toolkit/_cdf_tk/commands/_migrate/migration_io.py +44 -4
  7. cognite_toolkit/_cdf_tk/commands/_upload.py +39 -44
  8. cognite_toolkit/_cdf_tk/cruds/_resource_cruds/datamodel.py +6 -4
  9. cognite_toolkit/_cdf_tk/storageio/_annotations.py +16 -14
  10. cognite_toolkit/_cdf_tk/storageio/_base.py +3 -4
  11. cognite_toolkit/_cdf_tk/storageio/_datapoints.py +3 -4
  12. cognite_toolkit/_cdf_tk/utils/fileio/__init__.py +2 -0
  13. cognite_toolkit/_cdf_tk/utils/fileio/_base.py +5 -1
  14. cognite_toolkit/_cdf_tk/utils/fileio/_readers.py +61 -18
  15. cognite_toolkit/_cdf_tk/utils/fileio/_writers.py +14 -14
  16. cognite_toolkit/_repo_files/GitHub/.github/workflows/deploy.yaml +1 -1
  17. cognite_toolkit/_repo_files/GitHub/.github/workflows/dry-run.yaml +1 -1
  18. cognite_toolkit/_resources/cdf.toml +1 -1
  19. cognite_toolkit/_version.py +1 -1
  20. {cognite_toolkit-0.6.105.dist-info → cognite_toolkit-0.6.107.dist-info}/METADATA +1 -1
  21. {cognite_toolkit-0.6.105.dist-info → cognite_toolkit-0.6.107.dist-info}/RECORD +24 -24
  22. {cognite_toolkit-0.6.105.dist-info → cognite_toolkit-0.6.107.dist-info}/WHEEL +0 -0
  23. {cognite_toolkit-0.6.105.dist-info → cognite_toolkit-0.6.107.dist-info}/entry_points.txt +0 -0
  24. {cognite_toolkit-0.6.105.dist-info → cognite_toolkit-0.6.107.dist-info}/licenses/LICENSE +0 -0
@@ -4,6 +4,7 @@ from typing import Annotated, Any
4
4
 
5
5
  import questionary
6
6
  import typer
7
+ from cognite.client.data_classes import Annotation
7
8
  from cognite.client.data_classes.data_modeling import ContainerId
8
9
 
9
10
  from cognite_toolkit._cdf_tk.client import ToolkitClient
@@ -19,6 +20,7 @@ from cognite_toolkit._cdf_tk.commands._migrate.creators import (
19
20
  )
20
21
  from cognite_toolkit._cdf_tk.commands._migrate.data_mapper import AssetCentricMapper
21
22
  from cognite_toolkit._cdf_tk.commands._migrate.migration_io import (
23
+ AnnotationMigrationIO,
22
24
  AssetCentricMigrationIO,
23
25
  )
24
26
  from cognite_toolkit._cdf_tk.commands._migrate.selectors import (
@@ -31,6 +33,7 @@ from cognite_toolkit._cdf_tk.utils.cli_args import parse_view_str
31
33
  from cognite_toolkit._cdf_tk.utils.interactive_select import (
32
34
  AssetInteractiveSelect,
33
35
  DataModelingSelect,
36
+ FileMetadataInteractiveSelect,
34
37
  ResourceViewMappingInteractiveSelect,
35
38
  )
36
39
  from cognite_toolkit._cdf_tk.utils.useful_types import AssetCentricKind
@@ -49,6 +52,7 @@ class MigrateApp(typer.Typer):
49
52
  self.command("events")(self.events)
50
53
  self.command("timeseries")(self.timeseries)
51
54
  self.command("files")(self.files)
55
+ self.command("annotations")(self.annotations)
52
56
  self.command("canvas")(self.canvas)
53
57
  # Uncomment when infield v2 config migration is ready
54
58
  # self.command("infield-configs")(self.infield_configs)
@@ -694,6 +698,149 @@ class MigrateApp(typer.Typer):
694
698
  )
695
699
  )
696
700
 
701
+ @classmethod
702
+ def annotations(
703
+ cls,
704
+ ctx: typer.Context,
705
+ mapping_file: Annotated[
706
+ Path | None,
707
+ typer.Option(
708
+ "--mapping-file",
709
+ "-m",
710
+ help="Path to the mapping file that contains the mapping from Annotations to CogniteDiagramAnnotation. "
711
+ "This file is expected to have the following columns: [id, space, externalId, ingestionView].",
712
+ ),
713
+ ] = None,
714
+ data_set_id: Annotated[
715
+ str | None,
716
+ typer.Option(
717
+ "--data-set-id",
718
+ "-s",
719
+ help="The data set ID to select for the annotations to migrate. If not provided and the mapping file is not provided, "
720
+ "an interactive selection will be performed to select the data set to migrate annotations from.",
721
+ ),
722
+ ] = None,
723
+ instance_space: Annotated[
724
+ str | None,
725
+ typer.Option(
726
+ "--instance-space",
727
+ "-i",
728
+ help="The instance space to use for the migrated annotations. Required when using --data-set-id.",
729
+ ),
730
+ ] = None,
731
+ asset_annotation_mapping: Annotated[
732
+ str | None,
733
+ typer.Option(
734
+ "--asset-annotation-mapping",
735
+ "-a",
736
+ help="The ingestion mapping to use for asset-linked annotations. If not provided, "
737
+ "the default mapping (cdf_asset_annotations_mapping) will be used.",
738
+ ),
739
+ ] = None,
740
+ file_annotation_mapping: Annotated[
741
+ str | None,
742
+ typer.Option(
743
+ "--file-annotation-mapping",
744
+ "-f",
745
+ help="The ingestion mapping to use for file-linked annotations. If not provided, "
746
+ "the default mapping (cdf_file_annotations_mapping) will be used.",
747
+ ),
748
+ ] = None,
749
+ log_dir: Annotated[
750
+ Path,
751
+ typer.Option(
752
+ "--log-dir",
753
+ "-l",
754
+ help="Path to the directory where logs will be stored. If the directory does not exist, it will be created.",
755
+ ),
756
+ ] = Path(f"migration_logs_{TODAY!s}"),
757
+ dry_run: Annotated[
758
+ bool,
759
+ typer.Option(
760
+ "--dry-run",
761
+ "-d",
762
+ help="If set, the migration will not be executed, but only a report of what would be done is printed.",
763
+ ),
764
+ ] = False,
765
+ verbose: Annotated[
766
+ bool,
767
+ typer.Option(
768
+ "--verbose",
769
+ "-v",
770
+ help="Turn on to get more verbose output when running the command",
771
+ ),
772
+ ] = False,
773
+ ) -> None:
774
+ """Migrate Annotations to CogniteDiagramAnnotation edges in data modeling.
775
+
776
+ Annotations are diagram annotations that link assets or files to other resources. This command
777
+ migrates them to edges in the data modeling space, preserving the relationships and metadata.
778
+ """
779
+ client = EnvironmentVariables.create_from_environment().get_client()
780
+
781
+ if data_set_id is not None and mapping_file is not None:
782
+ raise typer.BadParameter("Cannot specify both data_set_id and mapping_file")
783
+ elif mapping_file is not None:
784
+ selected: AssetCentricMigrationSelector = MigrationCSVFileSelector(
785
+ datafile=mapping_file, kind="Annotations"
786
+ )
787
+ annotation_io = AnnotationMigrationIO(client)
788
+ elif data_set_id is not None:
789
+ if instance_space is None:
790
+ raise typer.BadParameter("--instance-space is required when using --data-set-id")
791
+ selected = MigrateDataSetSelector(data_set_external_id=data_set_id, kind="Annotations")
792
+ annotation_io = AnnotationMigrationIO(
793
+ client,
794
+ instance_space=instance_space,
795
+ default_asset_annotation_mapping=asset_annotation_mapping,
796
+ default_file_annotation_mapping=file_annotation_mapping,
797
+ )
798
+ else:
799
+ # Interactive selection
800
+ selector = FileMetadataInteractiveSelect(client, "migrate")
801
+ selected_data_set_id = selector.select_data_set(allow_empty=False)
802
+ dm_selector = DataModelingSelect(client, "migrate")
803
+ selected_instance_space = dm_selector.select_instance_space(
804
+ multiselect=False,
805
+ message="In which instance space do you want to create the annotations?",
806
+ include_empty=True,
807
+ )
808
+ if selected_instance_space is None:
809
+ raise typer.Abort()
810
+ asset_annotations_selector = ResourceViewMappingInteractiveSelect(client, "migrate asset annotations")
811
+ asset_annotation_mapping = asset_annotations_selector.select_resource_view_mapping(
812
+ resource_type="assetAnnotation",
813
+ ).external_id
814
+ file_annotations_selector = ResourceViewMappingInteractiveSelect(client, "migrate file annotations")
815
+ file_annotation_mapping = file_annotations_selector.select_resource_view_mapping(
816
+ resource_type="fileAnnotation",
817
+ ).external_id
818
+
819
+ selected = MigrateDataSetSelector(data_set_external_id=selected_data_set_id, kind="Annotations")
820
+ annotation_io = AnnotationMigrationIO(
821
+ client,
822
+ instance_space=selected_instance_space,
823
+ default_asset_annotation_mapping=asset_annotation_mapping,
824
+ default_file_annotation_mapping=file_annotation_mapping,
825
+ )
826
+
827
+ dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).ask()
828
+ verbose = questionary.confirm("Do you want verbose output?", default=verbose).ask()
829
+ if any(res is None for res in [dry_run, verbose]):
830
+ raise typer.Abort()
831
+
832
+ cmd = MigrationCommand()
833
+ cmd.run(
834
+ lambda: cmd.migrate(
835
+ selected=selected,
836
+ data=annotation_io,
837
+ mapper=AssetCentricMapper[Annotation](client),
838
+ log_dir=log_dir,
839
+ dry_run=dry_run,
840
+ verbose=verbose,
841
+ )
842
+ )
843
+
697
844
  @staticmethod
698
845
  def canvas(
699
846
  ctx: typer.Context,
@@ -15,7 +15,10 @@ from cognite.client.data_classes.data_modeling.views import ViewProperty
15
15
  from cognite.client.utils._identifier import InstanceId
16
16
 
17
17
  from cognite_toolkit._cdf_tk.client import ToolkitClient
18
- from cognite_toolkit._cdf_tk.client.data_classes.migration import AssetCentricId, ResourceViewMapping
18
+ from cognite_toolkit._cdf_tk.client.data_classes.migration import (
19
+ AssetCentricId,
20
+ ResourceViewMappingApply,
21
+ )
19
22
  from cognite_toolkit._cdf_tk.utils.collection import flatten_dict_json_path
20
23
  from cognite_toolkit._cdf_tk.utils.dtype_conversion import (
21
24
  asset_centric_convert_to_primary_property,
@@ -163,7 +166,7 @@ class DirectRelationCache:
163
166
  def asset_centric_to_dm(
164
167
  resource: AssetCentricResourceExtended,
165
168
  instance_id: InstanceId,
166
- view_source: ResourceViewMapping,
169
+ view_source: ResourceViewMappingApply,
167
170
  view_properties: dict[str, ViewProperty],
168
171
  direct_relation_cache: DirectRelationCache,
169
172
  ) -> tuple[NodeApply | EdgeApply | None, ConversionIssue]:
@@ -172,7 +175,7 @@ def asset_centric_to_dm(
172
175
  Args:
173
176
  resource (CogniteResource): The asset-centric resource to convert.
174
177
  instance_id (NodeId | EdgeApply): The ID of the instance to create or update.
175
- view_source (ResourceViewMapping): The view source defining how to map the resource to the data model.
178
+ view_source (ResourceViewMappingApply): The view source defining how to map the resource to the data model.
176
179
  view_properties (dict[str, ViewProperty]): The defined properties referenced in the view source mapping.
177
180
  direct_relation_cache (DirectRelationCache): Cache for direct relation references.
178
181
 
@@ -14,7 +14,11 @@ from pydantic import BaseModel, BeforeValidator, field_validator, model_validato
14
14
  from cognite_toolkit._cdf_tk.client.data_classes.instances import InstanceApplyList
15
15
  from cognite_toolkit._cdf_tk.client.data_classes.migration import AssetCentricId
16
16
  from cognite_toolkit._cdf_tk.client.data_classes.pending_instances_ids import PendingInstanceId
17
- from cognite_toolkit._cdf_tk.commands._migrate.default_mappings import create_default_mappings
17
+ from cognite_toolkit._cdf_tk.commands._migrate.default_mappings import (
18
+ ASSET_ANNOTATIONS_ID,
19
+ FILE_ANNOTATIONS_ID,
20
+ create_default_mappings,
21
+ )
18
22
  from cognite_toolkit._cdf_tk.exceptions import ToolkitValueError
19
23
  from cognite_toolkit._cdf_tk.storageio._data_classes import ModelList
20
24
  from cognite_toolkit._cdf_tk.utils.useful_types import (
@@ -182,6 +186,18 @@ class FileMapping(MigrationMapping):
182
186
  class AnnotationMapping(MigrationMapping):
183
187
  resource_type: Literal["annotation"] = "annotation"
184
188
  instance_id: EdgeId
189
+ annotation_type: Literal["diagrams.AssetLink", "diagrams.FileLink"] | None = None
190
+
191
+ def get_ingestion_view(self) -> str:
192
+ """Get the ingestion view for the mapping. If not specified, return the default ingestion view."""
193
+ if self.ingestion_view:
194
+ return self.ingestion_view
195
+ elif self.annotation_type == "diagrams.AssetLink":
196
+ return ASSET_ANNOTATIONS_ID
197
+ elif self.annotation_type == "diagrams.FileLink":
198
+ return FILE_ANNOTATIONS_ID
199
+ else:
200
+ raise ToolkitValueError("Cannot determine default ingestion view for annotation without annotation_type")
185
201
 
186
202
  @field_validator("instance_id", mode="before")
187
203
  def _validate_instance_id(cls, v: Any) -> Any:
@@ -14,9 +14,10 @@ from cognite.client.data_classes.data_modeling import (
14
14
  )
15
15
 
16
16
  from cognite_toolkit._cdf_tk.client import ToolkitClient
17
- from cognite_toolkit._cdf_tk.client.data_classes.migration import ResourceViewMapping
17
+ from cognite_toolkit._cdf_tk.client.data_classes.migration import ResourceViewMappingApply
18
18
  from cognite_toolkit._cdf_tk.commands._migrate.conversion import DirectRelationCache, asset_centric_to_dm
19
19
  from cognite_toolkit._cdf_tk.commands._migrate.data_classes import AssetCentricMapping
20
+ from cognite_toolkit._cdf_tk.commands._migrate.default_mappings import create_default_mappings
20
21
  from cognite_toolkit._cdf_tk.commands._migrate.issues import ConversionIssue, MigrationIssue
21
22
  from cognite_toolkit._cdf_tk.commands._migrate.selectors import AssetCentricMigrationSelector
22
23
  from cognite_toolkit._cdf_tk.constants import MISSING_INSTANCE_SPACE
@@ -24,7 +25,7 @@ from cognite_toolkit._cdf_tk.exceptions import ToolkitValueError
24
25
  from cognite_toolkit._cdf_tk.storageio._base import T_Selector, T_WriteCogniteResource
25
26
  from cognite_toolkit._cdf_tk.utils import humanize_collection
26
27
  from cognite_toolkit._cdf_tk.utils.useful_types import (
27
- T_AssetCentricResource,
28
+ T_AssetCentricResourceExtended,
28
29
  )
29
30
 
30
31
 
@@ -56,25 +57,27 @@ class DataMapper(Generic[T_Selector, T_CogniteResource, T_WriteCogniteResource],
56
57
 
57
58
 
58
59
  class AssetCentricMapper(
59
- DataMapper[AssetCentricMigrationSelector, AssetCentricMapping[T_AssetCentricResource], InstanceApply]
60
+ DataMapper[AssetCentricMigrationSelector, AssetCentricMapping[T_AssetCentricResourceExtended], InstanceApply]
60
61
  ):
61
62
  def __init__(self, client: ToolkitClient) -> None:
62
63
  self.client = client
63
64
  self._ingestion_view_by_id: dict[ViewId, View] = {}
64
- self._view_mapping_by_id: dict[str, ResourceViewMapping] = {}
65
+ self._view_mapping_by_id: dict[str, ResourceViewMappingApply] = {}
65
66
  self._direct_relation_cache = DirectRelationCache(client)
66
67
 
67
68
  def prepare(self, source_selector: AssetCentricMigrationSelector) -> None:
68
69
  ingestion_view_ids = source_selector.get_ingestion_mappings()
69
70
  ingestion_views = self.client.migration.resource_view_mapping.retrieve(ingestion_view_ids)
70
- self._view_mapping_by_id = {view.external_id: view for view in ingestion_views}
71
+ defaults = {mapping.external_id: mapping for mapping in create_default_mappings()}
72
+ # Custom mappings from CDF override the default mappings
73
+ self._view_mapping_by_id = defaults | {view.external_id: view.as_write() for view in ingestion_views}
71
74
  missing_mappings = set(ingestion_view_ids) - set(self._view_mapping_by_id.keys())
72
75
  if missing_mappings:
73
76
  raise ToolkitValueError(
74
77
  f"The following ingestion views were not found: {humanize_collection(missing_mappings)}"
75
78
  )
76
79
 
77
- view_ids = list({view.view_id for view in ingestion_views})
80
+ view_ids = list({mapping.view_id for mapping in self._view_mapping_by_id.values()})
78
81
  views = self.client.data_modeling.views.retrieve(view_ids)
79
82
  self._ingestion_view_by_id = {view.as_id(): view for view in views}
80
83
  missing_views = set(view_ids) - set(self._ingestion_view_by_id.keys())
@@ -84,7 +87,7 @@ class AssetCentricMapper(
84
87
  )
85
88
 
86
89
  def map(
87
- self, source: Sequence[AssetCentricMapping[T_AssetCentricResource]]
90
+ self, source: Sequence[AssetCentricMapping[T_AssetCentricResourceExtended]]
88
91
  ) -> Sequence[tuple[InstanceApply | None, ConversionIssue]]:
89
92
  """Map a chunk of asset-centric data to InstanceApplyList format."""
90
93
  # We update the direct relation cache in bulk for all resources in the chunk.
@@ -96,7 +99,7 @@ class AssetCentricMapper(
96
99
  return output
97
100
 
98
101
  def _map_single_item(
99
- self, item: AssetCentricMapping[T_AssetCentricResource]
102
+ self, item: AssetCentricMapping[T_AssetCentricResourceExtended]
100
103
  ) -> tuple[NodeApply | EdgeApply | None, ConversionIssue]:
101
104
  mapping = item.mapping
102
105
  ingestion_view = mapping.get_ingestion_view()
@@ -93,7 +93,7 @@ def create_default_mappings() -> list[ResourceViewMappingApply]:
93
93
  view_id=ViewId("cdf_cdm", "CogniteDiagramAnnotation", "v1"),
94
94
  property_mapping={
95
95
  # We are ignoring the symbol region in the default mapping.
96
- "annotatedResource.id": "edge.startNode",
96
+ "annotatedResourceId": "edge.startNode",
97
97
  "annotationType": "edge.type.externalId",
98
98
  "creatingUser": "sourceCreatedUser",
99
99
  "creatingApp": "sourceId",
@@ -114,16 +114,16 @@ def create_default_mappings() -> list[ResourceViewMappingApply]:
114
114
  ResourceViewMappingApply(
115
115
  external_id=FILE_ANNOTATIONS_ID,
116
116
  resource_type="fileAnnotation",
117
- view_id=ViewId("cdf_cdm", "CogniteFileAnnotation", "v1"),
117
+ view_id=ViewId("cdf_cdm", "CogniteDiagramAnnotation", "v1"),
118
118
  property_mapping={
119
- "annotatedResource.id": "edge.startNode",
119
+ "annotatedResourceId": "edge.startNode",
120
120
  "annotationType": "edge.type.externalId",
121
121
  "creatingUser": "sourceCreatedUser",
122
122
  "creatingApp": "sourceId",
123
123
  "creatingAppVersion": "sourceContext",
124
124
  "status": "status",
125
- "data.fileRef.id": "edge.startNode",
126
- "data.fileRef.externalId": "edge.startNode",
125
+ "data.fileRef.id": "edge.endNode",
126
+ "data.fileRef.externalId": "edge.endNode",
127
127
  "data.description": "description",
128
128
  "data.pageNumber": "startNodePageNumber",
129
129
  "data.textRegion.confidence": "confidence",
@@ -33,6 +33,7 @@ from .data_classes import (
33
33
  MigrationMappingList,
34
34
  )
35
35
  from .data_model import INSTANCE_SOURCE_VIEW_ID
36
+ from .default_mappings import ASSET_ANNOTATIONS_ID, FILE_ANNOTATIONS_ID
36
37
  from .selectors import AssetCentricMigrationSelector, MigrateDataSetSelector, MigrationCSVFileSelector
37
38
 
38
39
 
@@ -213,6 +214,16 @@ class AssetCentricMigrationIO(
213
214
  class AnnotationMigrationIO(
214
215
  UploadableStorageIO[AssetCentricMigrationSelector, AssetCentricMapping[Annotation], InstanceApply]
215
216
  ):
217
+ """IO class for migrating Annotations.
218
+
219
+ Args:
220
+ client: The ToolkitClient to use for CDF interactions.
221
+ instance_space: The instance space to use for the migrated annotations.
222
+ default_asset_annotation_mapping: The default ingestion mapping to use for asset-linked annotations.
223
+ default_file_annotation_mapping: The default ingestion mappingto use for file-linked annotations.
224
+
225
+ """
226
+
216
227
  KIND = "AnnotationMigration"
217
228
  SUPPORTED_DOWNLOAD_FORMATS = frozenset({".parquet", ".csv", ".ndjson"})
218
229
  SUPPORTED_COMPRESSIONS = frozenset({".gz"})
@@ -220,17 +231,28 @@ class AnnotationMigrationIO(
220
231
  CHUNK_SIZE = 1000
221
232
  UPLOAD_ENDPOINT = InstanceIO.UPLOAD_ENDPOINT
222
233
 
223
- def __init__(self, client: ToolkitClient, instance_space: str | None = None) -> None:
234
+ def __init__(
235
+ self,
236
+ client: ToolkitClient,
237
+ instance_space: str | None = None,
238
+ default_asset_annotation_mapping: str | None = None,
239
+ default_file_annotation_mapping: str | None = None,
240
+ ) -> None:
224
241
  super().__init__(client)
225
242
  self.annotation_io = AnnotationIO(client)
226
243
  self.instance_space = instance_space
244
+ self.default_asset_annotation_mapping = default_asset_annotation_mapping or ASSET_ANNOTATIONS_ID
245
+ self.default_file_annotation_mapping = default_file_annotation_mapping or FILE_ANNOTATIONS_ID
227
246
 
228
247
  def as_id(self, item: AssetCentricMapping[Annotation]) -> str:
229
248
  return f"Annotation_{item.mapping.id}"
230
249
 
231
250
  def count(self, selector: AssetCentricMigrationSelector) -> int | None:
232
- # There is no efficient way to count annotations in CDF.
233
- return None
251
+ if isinstance(selector, MigrationCSVFileSelector):
252
+ return len(selector.items)
253
+ else:
254
+ # There is no efficient way to count annotations in CDF.
255
+ return None
234
256
 
235
257
  def stream_data(self, selector: AssetCentricMigrationSelector, limit: int | None = None) -> Iterable[Page]:
236
258
  if isinstance(selector, MigrateDataSetSelector):
@@ -253,8 +275,10 @@ class AnnotationMigrationIO(
253
275
  mapping = AnnotationMapping(
254
276
  instance_id=EdgeId(space=self.instance_space, external_id=f"annotation_{resource.id!r}"),
255
277
  id=resource.id,
256
- ingestion_view=selector.ingestion_mapping,
278
+ ingestion_view=self._get_mapping(selector.ingestion_mapping, resource),
257
279
  preferred_consumer_view=selector.preferred_consumer_view,
280
+ # The PySDK is poorly typed.
281
+ annotation_type=resource.annotation_type, # type: ignore[arg-type]
258
282
  )
259
283
  mapping_list.append(AssetCentricMapping(mapping=mapping, resource=resource))
260
284
  yield mapping_list
@@ -275,6 +299,7 @@ class AnnotationMigrationIO(
275
299
  if resource is None:
276
300
  not_found += 1
277
301
  continue
302
+ mapping.ingestion_view = self._get_mapping(mapping.ingestion_view, resource)
278
303
  chunk.append(AssetCentricMapping(mapping=mapping, resource=resource))
279
304
  if chunk:
280
305
  yield chunk
@@ -284,6 +309,21 @@ class AnnotationMigrationIO(
284
309
  f"Could not find {not_found} annotations referenced in the CSV file. They will be skipped during migration."
285
310
  ).print_warning(include_timestamp=True, console=self.client.console)
286
311
 
312
+ def _get_mapping(self, current_mapping: str | None, resource: Annotation) -> str:
313
+ try:
314
+ return (
315
+ current_mapping
316
+ or {
317
+ "diagrams.AssetLink": self.default_asset_annotation_mapping,
318
+ "diagrams.FileLink": self.default_file_annotation_mapping,
319
+ }[resource.annotation_type]
320
+ )
321
+ except KeyError as e:
322
+ raise ToolkitValueError(
323
+ f"Could not determine default ingestion view for annotation type '{resource.annotation_type}'. "
324
+ "Please specify the ingestion view explicitly in the CSV file."
325
+ ) from e
326
+
287
327
  def json_to_resource(self, item_json: dict[str, JsonVal]) -> InstanceApply:
288
328
  raise NotImplementedError("Deserializing Annotation Migrations from JSON is not supported.")
289
329
 
@@ -27,7 +27,7 @@ from cognite_toolkit._cdf_tk.tk_warnings import HighSeverityWarning, MediumSever
27
27
  from cognite_toolkit._cdf_tk.tk_warnings.fileread import ResourceFormatWarning
28
28
  from cognite_toolkit._cdf_tk.utils.auth import EnvironmentVariables
29
29
  from cognite_toolkit._cdf_tk.utils.file import read_yaml_file
30
- from cognite_toolkit._cdf_tk.utils.fileio import TABLE_READ_CLS_BY_FORMAT, FileReader
30
+ from cognite_toolkit._cdf_tk.utils.fileio import MultiFileReader
31
31
  from cognite_toolkit._cdf_tk.utils.http_client import HTTPClient, ItemMessage, SuccessResponseItems
32
32
  from cognite_toolkit._cdf_tk.utils.producer_worker import ProducerWorkerExecutor
33
33
  from cognite_toolkit._cdf_tk.utils.progress_tracker import ProgressTracker
@@ -87,7 +87,7 @@ class UploadCommand(ToolkitCommand):
87
87
  ├── datafile2.Manifest.yaml # Manifest file for datafile2
88
88
  └── ...
89
89
  """
90
- console = Console()
90
+ console = client.console
91
91
  data_files_by_selector = self._find_data_files(input_dir, kind)
92
92
 
93
93
  self._deploy_resource_folder(input_dir / DATA_RESOURCE_DIR, deploy_resources, client, console, dry_run, verbose)
@@ -225,49 +225,44 @@ class UploadCommand(ToolkitCommand):
225
225
  io = self._create_selected_io(selector, datafiles[0], client)
226
226
  if io is None:
227
227
  continue
228
- for data_file in datafiles:
229
- file_display = self._path_as_display_name(data_file)
230
- if verbose:
231
- console.print(f"{action} {selector.display_name} from {file_display.as_posix()!r}")
232
- reader = FileReader.from_filepath(data_file)
233
- is_table = reader.format in TABLE_READ_CLS_BY_FORMAT
234
- if is_table and not isinstance(io, TableUploadableStorageIO):
235
- raise ToolkitValueError(f"{selector.display_name} does not support {reader.format!r} files.")
236
- tracker = ProgressTracker[str]([self._UPLOAD])
237
- executor = ProducerWorkerExecutor[list[tuple[str, dict[str, JsonVal]]], Sequence[UploadItem]](
238
- download_iterable=io.read_chunks(reader),
239
- process=partial(io.rows_to_data, selector=selector)
240
- if is_table and isinstance(io, TableUploadableStorageIO)
241
- else io.json_chunk_to_data,
242
- write=partial(
243
- self._upload_items,
244
- upload_client=upload_client,
245
- io=io,
246
- dry_run=dry_run,
247
- selector=selector,
248
- tracker=tracker,
249
- console=console,
250
- ),
251
- iteration_count=None,
252
- max_queue_size=self._MAX_QUEUE_SIZE,
253
- download_description=f"Reading {file_count:,}/{total_file_count + 1:,}: {file_display.as_posix()!s}",
254
- process_description="Processing",
255
- write_description=f"{action} {selector.display_name!r}",
228
+ reader = MultiFileReader(datafiles)
229
+ if reader.is_table and not isinstance(io, TableUploadableStorageIO):
230
+ raise ToolkitValueError(f"{selector.display_name} does not support {reader.format!r} files.")
231
+ tracker = ProgressTracker[str]([self._UPLOAD])
232
+ executor = ProducerWorkerExecutor[list[tuple[str, dict[str, JsonVal]]], Sequence[UploadItem]](
233
+ download_iterable=io.read_chunks(reader),
234
+ process=partial(io.rows_to_data, selector=selector)
235
+ if reader.is_table and isinstance(io, TableUploadableStorageIO)
236
+ else io.json_chunk_to_data,
237
+ write=partial(
238
+ self._upload_items,
239
+ upload_client=upload_client,
240
+ io=io,
241
+ dry_run=dry_run,
242
+ selector=selector,
243
+ tracker=tracker,
256
244
  console=console,
257
- )
258
- executor.run()
259
- file_count += 1
260
- executor.raise_on_error()
261
- final_action = "Uploaded" if not dry_run else "Would upload"
262
- suffix = " successfully" if not dry_run else ""
263
- results = tracker.aggregate()
264
- success = results.get((self._UPLOAD, "success"), 0)
265
- failed = results.get((self._UPLOAD, "failed"), 0)
266
- if failed > 0:
267
- suffix += f", {failed:,} failed"
268
- console.print(
269
- f"{final_action} {success:,} {selector.display_name} from {file_display.as_posix()!r}{suffix}."
270
- )
245
+ ),
246
+ iteration_count=None,
247
+ max_queue_size=self._MAX_QUEUE_SIZE,
248
+ download_description=f"Reading {selector.display_name!r} files",
249
+ process_description="Processing",
250
+ write_description=f"{action} {selector.display_name!r}",
251
+ console=console,
252
+ )
253
+ executor.run()
254
+ file_count += len(datafiles)
255
+ executor.raise_on_error()
256
+ final_action = "Uploaded" if not dry_run else "Would upload"
257
+ suffix = " successfully" if not dry_run else ""
258
+ results = tracker.aggregate()
259
+ success = results.get((self._UPLOAD, "success"), 0)
260
+ failed = results.get((self._UPLOAD, "failed"), 0)
261
+ if failed > 0:
262
+ suffix += f", {failed:,} failed"
263
+ console.print(
264
+ f"{final_action} {success:,} {selector.display_name} from {len(datafiles)} files{suffix}."
265
+ )
271
266
 
272
267
  @staticmethod
273
268
  def _path_as_display_name(input_path: Path, cwd: Path = Path.cwd()) -> Path:
@@ -328,6 +328,7 @@ class ContainerCRUD(ResourceContainerCRUD[ContainerId, ContainerApply, Container
328
328
 
329
329
  def dump_resource(self, resource: Container, local: dict[str, Any] | None = None) -> dict[str, Any]:
330
330
  dumped = resource.as_write().dump()
331
+ has_local = local is not None
331
332
  local = local or {}
332
333
  for key in ["constraints", "indexes"]:
333
334
  if not dumped.get(key) and key not in local:
@@ -339,15 +340,16 @@ class ContainerCRUD(ResourceContainerCRUD[ContainerId, ContainerApply, Container
339
340
  continue
340
341
  local_prop = local_prop_by_id[prop_id]
341
342
  for key, default in [("immutable", False), ("autoIncrement", False), ("nullable", True)]:
342
- if cdf_prop.get(key) is default and key not in local_prop:
343
+ if has_local and cdf_prop.get(key) is default and key not in local_prop:
343
344
  cdf_prop.pop(key, None)
344
345
  cdf_type = cdf_prop.get("type", {})
345
346
  local_type = local_prop.get("type", {})
346
347
  for key, type_default in [("list", False), ("collation", "ucs_basic")]:
347
- if cdf_type.get(key) == type_default and key not in local_type:
348
+ if has_local and cdf_type.get(key) == type_default and key not in local_type:
348
349
  cdf_type.pop(key, None)
349
- if "usedFor" not in local:
350
- dumped.pop("usedFor", None)
350
+ if has_local and "usedFor" not in local and dumped.get("usedFor") == "node":
351
+ # Only drop if set to default by server.
352
+ dumped.pop("usedFor", None)
351
353
  return dumped
352
354
 
353
355
  def create(self, items: Sequence[ContainerApply]) -> ContainerList:
@@ -26,21 +26,23 @@ class AnnotationIO(StorageIO[AssetCentricSelector, Annotation]):
26
26
  def stream_data(self, selector: AssetCentricSelector, limit: int | None = None) -> Iterable[Page[Annotation]]:
27
27
  total = 0
28
28
  for file_chunk in FileMetadataIO(self.client).stream_data(selector, None):
29
- # Todo Support pagination. This is missing in the SDK.
30
- results = self.client.annotations.list(
31
- filter=AnnotationFilter(
32
- annotated_resource_type="file",
33
- annotated_resource_ids=[{"id": file_metadata.id} for file_metadata in file_chunk.items],
29
+ for annotation_type in ["diagrams.AssetLink", "diagrams.FileLink"]:
30
+ # Todo Support pagination. This is missing in the SDK.
31
+ results = self.client.annotations.list(
32
+ filter=AnnotationFilter(
33
+ annotated_resource_type="file",
34
+ annotated_resource_ids=[{"id": file_metadata.id} for file_metadata in file_chunk.items],
35
+ annotation_type=annotation_type,
36
+ )
34
37
  )
35
- )
36
- if limit is not None and total + len(results) > limit:
37
- results = results[: limit - total]
38
-
39
- for chunk in chunker_sequence(results, self.CHUNK_SIZE):
40
- yield Page(worker_id="main", items=chunk)
41
- total += len(chunk)
42
- if limit is not None and total >= limit:
43
- break
38
+ if limit is not None and total + len(results) > limit:
39
+ results = results[: limit - total]
40
+
41
+ for chunk in chunker_sequence(results, self.CHUNK_SIZE):
42
+ yield Page(worker_id="main", items=chunk)
43
+ total += len(chunk)
44
+ if limit is not None and total >= limit:
45
+ break
44
46
 
45
47
  def count(self, selector: AssetCentricSelector) -> int | None:
46
48
  """There is no efficient way to count annotations in CDF."""
@@ -8,8 +8,7 @@ from cognite.client.data_classes._base import T_CogniteResource
8
8
  from cognite_toolkit._cdf_tk.client import ToolkitClient
9
9
  from cognite_toolkit._cdf_tk.exceptions import ToolkitNotImplementedError
10
10
  from cognite_toolkit._cdf_tk.utils.collection import chunker
11
- from cognite_toolkit._cdf_tk.utils.fileio import FileReader, SchemaColumn
12
- from cognite_toolkit._cdf_tk.utils.fileio._readers import TableReader
11
+ from cognite_toolkit._cdf_tk.utils.fileio import MultiFileReader, SchemaColumn
13
12
  from cognite_toolkit._cdf_tk.utils.http_client import HTTPClient, HTTPMessage, ItemsRequest
14
13
  from cognite_toolkit._cdf_tk.utils.useful_types import JsonVal, T_WriteCogniteResource
15
14
 
@@ -217,8 +216,8 @@ class UploadableStorageIO(
217
216
  raise NotImplementedError()
218
217
 
219
218
  @classmethod
220
- def read_chunks(cls, reader: FileReader) -> Iterable[list[tuple[str, dict[str, JsonVal]]]]:
221
- data_name = "row" if isinstance(reader, TableReader) else "line"
219
+ def read_chunks(cls, reader: MultiFileReader) -> Iterable[list[tuple[str, dict[str, JsonVal]]]]:
220
+ data_name = "row" if reader.is_table else "line"
222
221
  # Include name of line for better error messages
223
222
  iterable = ((f"{data_name} {line_no}", item) for line_no, item in reader.read_chunks_with_line_numbers())
224
223
 
@@ -19,8 +19,7 @@ from cognite_toolkit._cdf_tk.utils.dtype_conversion import (
19
19
  _TextConverter,
20
20
  _ValueConverter,
21
21
  )
22
- from cognite_toolkit._cdf_tk.utils.fileio import FileReader
23
- from cognite_toolkit._cdf_tk.utils.fileio._readers import TableReader
22
+ from cognite_toolkit._cdf_tk.utils.fileio._readers import MultiFileReader
24
23
  from cognite_toolkit._cdf_tk.utils.http_client import DataBodyRequest, HTTPClient, HTTPMessage
25
24
  from cognite_toolkit._cdf_tk.utils.useful_types import JsonVal
26
25
 
@@ -165,8 +164,8 @@ class DatapointsIO(TableUploadableStorageIO[DataPointsFileSelector, DataPointLis
165
164
  )
166
165
 
167
166
  @classmethod
168
- def read_chunks(cls, reader: FileReader) -> Iterable[list[tuple[str, dict[str, JsonVal]]]]:
169
- if not isinstance(reader, TableReader):
167
+ def read_chunks(cls, reader: MultiFileReader) -> Iterable[list[tuple[str, dict[str, JsonVal]]]]:
168
+ if not reader.is_table:
170
169
  raise RuntimeError("DatapointsIO can only read from TableReader instances.")
171
170
  iterator = iter(reader.read_chunks_with_line_numbers())
172
171
  try:
@@ -12,6 +12,7 @@ from ._readers import (
12
12
  CSVReader,
13
13
  FailedParsing,
14
14
  FileReader,
15
+ MultiFileReader,
15
16
  NDJsonReader,
16
17
  ParquetReader,
17
18
  YAMLReader,
@@ -45,6 +46,7 @@ __all__ = [
45
46
  "FileReader",
46
47
  "FileWriter",
47
48
  "GzipCompression",
49
+ "MultiFileReader",
48
50
  "NDJsonReader",
49
51
  "NDJsonWriter",
50
52
  "ParquetReader",
@@ -14,7 +14,11 @@ T_IO = TypeVar("T_IO", bound=IO)
14
14
 
15
15
 
16
16
  class FileIO(ABC):
17
- format: ClassVar[str]
17
+ FORMAT: ClassVar[str]
18
+
19
+ @property
20
+ def format(self) -> str:
21
+ return self.FORMAT
18
22
 
19
23
 
20
24
  @dataclass(frozen=True)
@@ -1,5 +1,6 @@
1
1
  import csv
2
2
  import json
3
+ import re
3
4
  from abc import ABC, abstractmethod
4
5
  from collections import Counter, defaultdict
5
6
  from collections.abc import Callable, Iterator, Mapping, Sequence
@@ -40,7 +41,7 @@ class FileReader(FileIO, ABC):
40
41
  raise NotImplementedError("This method should be implemented in subclasses.")
41
42
 
42
43
  @classmethod
43
- def from_filepath(cls, filepath: Path) -> "FileReader":
44
+ def from_filepath(cls, filepath: Path) -> "type[FileReader]":
44
45
  if len(filepath.suffixes) == 0:
45
46
  raise ToolkitValueError(
46
47
  f"File has no suffix. Available formats: {humanize_collection(FILE_READ_CLS_BY_FORMAT.keys())}."
@@ -55,15 +56,57 @@ class FileReader(FileIO, ABC):
55
56
  )
56
57
 
57
58
  if suffix in FILE_READ_CLS_BY_FORMAT:
58
- return FILE_READ_CLS_BY_FORMAT[suffix](input_file=filepath)
59
+ return FILE_READ_CLS_BY_FORMAT[suffix]
59
60
 
60
61
  raise ToolkitValueError(
61
62
  f"Unknown file format: {suffix}. Available formats: {humanize_collection(FILE_READ_CLS_BY_FORMAT.keys())}."
62
63
  )
63
64
 
64
65
 
66
+ class MultiFileReader(FileReader):
67
+ """Reads multiple files and yields chunks from each file sequentially.
68
+
69
+ Args:
70
+ input_files (Sequence[Path]): The list of file paths to read.
71
+ """
72
+
73
+ PART_PATTERN = re.compile(r"part-(\d{4})$")
74
+
75
+ def __init__(self, input_files: Sequence[Path]) -> None:
76
+ super().__init__(input_file=input_files[0])
77
+ self.input_files = input_files
78
+ reader_classes = Counter([FileReader.from_filepath(input_file) for input_file in self.input_files])
79
+ if len(reader_classes) > 1:
80
+ raise ToolkitValueError(
81
+ "All input files must be of the same format. "
82
+ f"Found formats: {humanize_collection([cls.FORMAT for cls in reader_classes.keys()])}."
83
+ )
84
+ self.reader_class = reader_classes.most_common(1)[0][0]
85
+
86
+ @property
87
+ def is_table(self) -> bool:
88
+ return issubclass(self.reader_class, TableReader)
89
+
90
+ @property
91
+ def format(self) -> str:
92
+ return self.reader_class.FORMAT
93
+
94
+ def read_chunks(self) -> Iterator[dict[str, JsonVal]]:
95
+ for input_file in sorted(self.input_files, key=self._part_no):
96
+ yield from self.reader_class(input_file).read_chunks()
97
+
98
+ def _part_no(self, path: Path) -> int:
99
+ match = self.PART_PATTERN.search(path.stem)
100
+ if match:
101
+ return int(match.group(1))
102
+ return 99999
103
+
104
+ def _read_chunks_from_file(self, file: TextIOWrapper) -> Iterator[dict[str, JsonVal]]:
105
+ raise NotImplementedError("This method is not used in MultiFileReader.")
106
+
107
+
65
108
  class NDJsonReader(FileReader):
66
- format = ".ndjson"
109
+ FORMAT = ".ndjson"
67
110
 
68
111
  def _read_chunks_from_file(self, file: TextIOWrapper) -> Iterator[dict[str, JsonVal]]:
69
112
  for line in file:
@@ -77,11 +120,11 @@ class YAMLBaseReader(FileReader, ABC):
77
120
 
78
121
 
79
122
  class YAMLReader(YAMLBaseReader):
80
- format = ".yaml"
123
+ FORMAT = ".yaml"
81
124
 
82
125
 
83
126
  class YMLReader(YAMLBaseReader):
84
- format = ".yml"
127
+ FORMAT = ".yml"
85
128
 
86
129
 
87
130
  @dataclass
@@ -171,8 +214,8 @@ class TableReader(FileReader, ABC):
171
214
 
172
215
  if not input_file.exists():
173
216
  raise ToolkitFileNotFoundError(f"File not found: {input_file.as_posix()!r}.")
174
- if input_file.suffix != cls.format:
175
- raise ToolkitValueError(f"Expected a {cls.format} file got a {input_file.suffix!r} file instead.")
217
+ if input_file.suffix != cls.FORMAT:
218
+ raise ToolkitValueError(f"Expected a {cls.FORMAT} file got a {input_file.suffix!r} file instead.")
176
219
 
177
220
  column_names, sample_rows = cls._read_sample_rows(input_file, sniff_rows)
178
221
  cls._check_column_names(column_names)
@@ -213,7 +256,7 @@ class TableReader(FileReader, ABC):
213
256
  class CSVReader(TableReader):
214
257
  """Reads CSV files and yields each row as a dictionary."""
215
258
 
216
- format = ".csv"
259
+ FORMAT = ".csv"
217
260
 
218
261
  def _read_chunks_from_file(self, file: TextIOWrapper) -> Iterator[dict[str, JsonVal]]:
219
262
  if self.keep_failed_cells and self.failed_cell:
@@ -257,7 +300,7 @@ class CSVReader(TableReader):
257
300
 
258
301
 
259
302
  class ParquetReader(TableReader):
260
- format = ".parquet"
303
+ FORMAT = ".parquet"
261
304
 
262
305
  def __init__(self, input_file: Path) -> None:
263
306
  # Parquet files have their own schema, so we don't need to sniff or provide one.
@@ -312,19 +355,19 @@ class ParquetReader(TableReader):
312
355
  FILE_READ_CLS_BY_FORMAT: Mapping[str, type[FileReader]] = {}
313
356
  TABLE_READ_CLS_BY_FORMAT: Mapping[str, type[TableReader]] = {}
314
357
  for subclass in get_concrete_subclasses(FileReader): # type: ignore[type-abstract]
315
- if not getattr(subclass, "format", None):
358
+ if not getattr(subclass, "FORMAT", None):
316
359
  continue
317
- if subclass.format in FILE_READ_CLS_BY_FORMAT:
360
+ if subclass.FORMAT in FILE_READ_CLS_BY_FORMAT:
318
361
  raise TypeError(
319
- f"Duplicate file format {subclass.format!r} found for classes "
320
- f"{FILE_READ_CLS_BY_FORMAT[subclass.format].__name__!r} and {subclass.__name__!r}."
362
+ f"Duplicate file format {subclass.FORMAT!r} found for classes "
363
+ f"{FILE_READ_CLS_BY_FORMAT[subclass.FORMAT].__name__!r} and {subclass.__name__!r}."
321
364
  )
322
365
  # We know we have a dict, but we want to expose FILE_READ_CLS_BY_FORMAT as a Mapping
323
- FILE_READ_CLS_BY_FORMAT[subclass.format] = subclass # type: ignore[index]
366
+ FILE_READ_CLS_BY_FORMAT[subclass.FORMAT] = subclass # type: ignore[index]
324
367
  if issubclass(subclass, TableReader):
325
- if subclass.format in TABLE_READ_CLS_BY_FORMAT:
368
+ if subclass.FORMAT in TABLE_READ_CLS_BY_FORMAT:
326
369
  raise TypeError(
327
- f"Duplicate table file format {subclass.format!r} found for classes "
328
- f"{TABLE_READ_CLS_BY_FORMAT[subclass.format].__name__!r} and {subclass.__name__!r}."
370
+ f"Duplicate table file format {subclass.FORMAT!r} found for classes "
371
+ f"{TABLE_READ_CLS_BY_FORMAT[subclass.FORMAT].__name__!r} and {subclass.__name__!r}."
329
372
  )
330
- TABLE_READ_CLS_BY_FORMAT[subclass.format] = subclass # type: ignore[index]
373
+ TABLE_READ_CLS_BY_FORMAT[subclass.FORMAT] = subclass # type: ignore[index]
@@ -154,7 +154,7 @@ class TableWriter(FileWriter[T_IO], ABC):
154
154
 
155
155
 
156
156
  class NDJsonWriter(FileWriter[TextIOWrapper]):
157
- format = ".ndjson"
157
+ FORMAT = ".ndjson"
158
158
 
159
159
  class _DateTimeEncoder(json.JSONEncoder):
160
160
  def default(self, obj: object) -> object:
@@ -181,15 +181,15 @@ class YAMLBaseWriter(FileWriter[TextIOWrapper], ABC):
181
181
 
182
182
 
183
183
  class YAMLWriter(YAMLBaseWriter):
184
- format = ".yaml"
184
+ FORMAT = ".yaml"
185
185
 
186
186
 
187
187
  class YMLWriter(YAMLBaseWriter):
188
- format = ".yml"
188
+ FORMAT = ".yml"
189
189
 
190
190
 
191
191
  class CSVWriter(TableWriter[TextIOWrapper]):
192
- format = ".csv"
192
+ FORMAT = ".csv"
193
193
 
194
194
  def __init__(
195
195
  self,
@@ -241,7 +241,7 @@ class CSVWriter(TableWriter[TextIOWrapper]):
241
241
 
242
242
 
243
243
  class ParquetWriter(TableWriter["pq.ParquetWriter"]):
244
- format = ".parquet"
244
+ FORMAT = ".parquet"
245
245
 
246
246
  def _create_writer(self, filepath: Path) -> "pq.ParquetWriter":
247
247
  import pyarrow.parquet as pq
@@ -411,19 +411,19 @@ class ParquetWriter(TableWriter["pq.ParquetWriter"]):
411
411
  FILE_WRITE_CLS_BY_FORMAT: Mapping[str, type[FileWriter]] = {}
412
412
  TABLE_WRITE_CLS_BY_FORMAT: Mapping[str, type[TableWriter]] = {}
413
413
  for subclass in get_concrete_subclasses(FileWriter): # type: ignore[type-abstract]
414
- if not getattr(subclass, "format", None):
414
+ if not getattr(subclass, "FORMAT", None):
415
415
  continue
416
- if subclass.format in FILE_WRITE_CLS_BY_FORMAT:
416
+ if subclass.FORMAT in FILE_WRITE_CLS_BY_FORMAT:
417
417
  raise TypeError(
418
- f"Duplicate file format {subclass.format!r} found for classes "
419
- f"{FILE_WRITE_CLS_BY_FORMAT[subclass.format].__name__!r} and {subclass.__name__!r}."
418
+ f"Duplicate file format {subclass.FORMAT!r} found for classes "
419
+ f"{FILE_WRITE_CLS_BY_FORMAT[subclass.FORMAT].__name__!r} and {subclass.__name__!r}."
420
420
  )
421
421
  # We know we have a dict, but we want to expose FILE_WRITE_CLS_BY_FORMAT as a Mapping
422
- FILE_WRITE_CLS_BY_FORMAT[subclass.format] = subclass # type: ignore[index]
422
+ FILE_WRITE_CLS_BY_FORMAT[subclass.FORMAT] = subclass # type: ignore[index]
423
423
  if issubclass(subclass, TableWriter):
424
- if subclass.format in TABLE_WRITE_CLS_BY_FORMAT:
424
+ if subclass.FORMAT in TABLE_WRITE_CLS_BY_FORMAT:
425
425
  raise TypeError(
426
- f"Duplicate table file format {subclass.format!r} found for classes "
427
- f"{TABLE_WRITE_CLS_BY_FORMAT[subclass.format].__name__!r} and {subclass.__name__!r}."
426
+ f"Duplicate table file format {subclass.FORMAT!r} found for classes "
427
+ f"{TABLE_WRITE_CLS_BY_FORMAT[subclass.FORMAT].__name__!r} and {subclass.__name__!r}."
428
428
  )
429
- TABLE_WRITE_CLS_BY_FORMAT[subclass.format] = subclass # type: ignore[index]
429
+ TABLE_WRITE_CLS_BY_FORMAT[subclass.FORMAT] = subclass # type: ignore[index]
@@ -12,7 +12,7 @@ jobs:
12
12
  environment: dev
13
13
  name: Deploy
14
14
  container:
15
- image: cognite/toolkit:0.6.105
15
+ image: cognite/toolkit:0.6.107
16
16
  env:
17
17
  CDF_CLUSTER: ${{ vars.CDF_CLUSTER }}
18
18
  CDF_PROJECT: ${{ vars.CDF_PROJECT }}
@@ -10,7 +10,7 @@ jobs:
10
10
  environment: dev
11
11
  name: Deploy Dry Run
12
12
  container:
13
- image: cognite/toolkit:0.6.105
13
+ image: cognite/toolkit:0.6.107
14
14
  env:
15
15
  CDF_CLUSTER: ${{ vars.CDF_CLUSTER }}
16
16
  CDF_PROJECT: ${{ vars.CDF_PROJECT }}
@@ -4,7 +4,7 @@ default_env = "<DEFAULT_ENV_PLACEHOLDER>"
4
4
  [modules]
5
5
  # This is the version of the modules. It should not be changed manually.
6
6
  # It will be updated by the 'cdf modules upgrade' command.
7
- version = "0.6.105"
7
+ version = "0.6.107"
8
8
 
9
9
  [alpha_flags]
10
10
  external-libraries = true
@@ -1 +1 @@
1
- __version__ = "0.6.105"
1
+ __version__ = "0.6.107"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cognite_toolkit
3
- Version: 0.6.105
3
+ Version: 0.6.107
4
4
  Summary: Official Cognite Data Fusion tool for project templates and configuration deployment
5
5
  Project-URL: Homepage, https://docs.cognite.com/cdf/deploy/cdf_toolkit/
6
6
  Project-URL: Changelog, https://github.com/cognitedata/toolkit/releases
@@ -1,6 +1,6 @@
1
1
  cognite_toolkit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  cognite_toolkit/_cdf.py,sha256=0abeQr1Tfk4lkGaoXyrnFC28wDSlR_8UGrh10noGduQ,6085
3
- cognite_toolkit/_version.py,sha256=WfvaY_npde01GAAtt1MaxhE137U1_NM3UTrLq2lyxhA,24
3
+ cognite_toolkit/_version.py,sha256=lu-o5QGcXmrHU8tes3QjIxzRHr_63kClROn7Lrtlq6s,24
4
4
  cognite_toolkit/_cdf_tk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  cognite_toolkit/_cdf_tk/cdf_toml.py,sha256=VSWV9h44HusWIaKpWgjrOMrc3hDoPTTXBXlp6-NOrIM,9079
6
6
  cognite_toolkit/_cdf_tk/constants.py,sha256=Gi7iGGzdUrOnBeIK6ix3XiBieHIwzLJO5BWjDI3a6l4,7082
@@ -19,7 +19,7 @@ cognite_toolkit/_cdf_tk/apps/_dev_app.py,sha256=q8DBr4BAK33AwsHW3gAWZWSjSaQRuCis
19
19
  cognite_toolkit/_cdf_tk/apps/_download_app.py,sha256=Io7mW3DhYpLiDrqJfThSQThHDWrXN9c8huu9zYd0u7E,19408
20
20
  cognite_toolkit/_cdf_tk/apps/_dump_app.py,sha256=Ec0aEqbKwCkxni09i06rfY31qZUyOVwbbvo7MHh4cf8,39056
21
21
  cognite_toolkit/_cdf_tk/apps/_landing_app.py,sha256=HxzSln3fJXs5NzulfQGUMropXcwMobUYpyePrCrQTQs,1502
22
- cognite_toolkit/_cdf_tk/apps/_migrate_app.py,sha256=xkXo1h3eQ14D5uQ0qQdq01etS1FV_fg6ZnvxMt8H-os,30580
22
+ cognite_toolkit/_cdf_tk/apps/_migrate_app.py,sha256=JuzY1t5pkW94Dyygr-mT2lheluR7cL8AmD2MfaXhc8Y,37093
23
23
  cognite_toolkit/_cdf_tk/apps/_modules_app.py,sha256=95_H2zccRJl2mWn0oQ5mjCaEDnG63sPKOkB81IgWcIk,7637
24
24
  cognite_toolkit/_cdf_tk/apps/_profile_app.py,sha256=vSRJW54bEvIul8_4rOqyOYA7ztXx7TFOvZRZWZTxMbg,7007
25
25
  cognite_toolkit/_cdf_tk/apps/_purge.py,sha256=e8IgDK2Fib2u30l71Q2trbJ1az90zSLWr5TViTINmL0,15415
@@ -108,7 +108,7 @@ cognite_toolkit/_cdf_tk/commands/_download.py,sha256=OBKPM_HGGA1i32th1SAgkQM_81C
108
108
  cognite_toolkit/_cdf_tk/commands/_profile.py,sha256=_4iX3AHAI6eLmRVUlWXCSvVHx1BZW2yDr_i2i9ECg6U,43120
109
109
  cognite_toolkit/_cdf_tk/commands/_purge.py,sha256=RadQHsmkPez3fZ5HCP9b82o2_fBx8P_-bTo7prkvWXU,32525
110
110
  cognite_toolkit/_cdf_tk/commands/_questionary_style.py,sha256=h-w7fZKkGls3TrzIGBKjsZSGoXJJIYchgD1StfA40r8,806
111
- cognite_toolkit/_cdf_tk/commands/_upload.py,sha256=axrdDIjW59LxByIwq379tfCEkoBSdq8kVx8ORmsOQxo,14422
111
+ cognite_toolkit/_cdf_tk/commands/_upload.py,sha256=iwLRK53pXP68vmgIGBx7I1ODtlH6bks7_TanHpr04zw,13900
112
112
  cognite_toolkit/_cdf_tk/commands/_utils.py,sha256=UxMJW5QYKts4om5n6x2Tq2ihvfO9gWjhQKeqZNFTlKg,402
113
113
  cognite_toolkit/_cdf_tk/commands/_virtual_env.py,sha256=GFAid4hplixmj9_HkcXqU5yCLj-fTXm4cloGD6U2swY,2180
114
114
  cognite_toolkit/_cdf_tk/commands/auth.py,sha256=PLjfxfJJKaqux1eB2fycIRlwwSMCbM3qxWDnFX-blJU,31720
@@ -127,14 +127,14 @@ cognite_toolkit/_cdf_tk/commands/run.py,sha256=JyX9jLEQej9eRrHVCCNlw4GuF80qETSol
127
127
  cognite_toolkit/_cdf_tk/commands/_migrate/__init__.py,sha256=i5ldcTah59K0E4fH5gHTV0GRvtDCEvVses9WQzn9Lno,226
128
128
  cognite_toolkit/_cdf_tk/commands/_migrate/canvas.py,sha256=R-z0yfOFcJZj-zRLhN-7z_-SLxqzSmONMgrbzNF9dGs,8843
129
129
  cognite_toolkit/_cdf_tk/commands/_migrate/command.py,sha256=jNoqqq81lbdfDTAQ5w2ctaYSUueLhZe0qjUKjCezk6s,14234
130
- cognite_toolkit/_cdf_tk/commands/_migrate/conversion.py,sha256=_okrcPRaNtM2d8u4Aba1hUYN8CgSTAcDgcugd76aXWA,16775
130
+ cognite_toolkit/_cdf_tk/commands/_migrate/conversion.py,sha256=Ew9JRYrd-Ol9G9csTzpnhXAgCFnX67MwDYOTsdJLP3E,16803
131
131
  cognite_toolkit/_cdf_tk/commands/_migrate/creators.py,sha256=FTu7w3G8KyPY8pagG3KdPpOmpLcjehaAg2auEy6iM7A,9605
132
- cognite_toolkit/_cdf_tk/commands/_migrate/data_classes.py,sha256=vOdvDPVibAbqLQt8hg7pmdHN1Fd6JUoyOZ-fCzfeEH8,9925
133
- cognite_toolkit/_cdf_tk/commands/_migrate/data_mapper.py,sha256=5gqVRJGa7rh3c9pwPdipxKfl-zXV4mmU0G_h3ehPe2Q,5339
132
+ cognite_toolkit/_cdf_tk/commands/_migrate/data_classes.py,sha256=_vMS_qAPj4yup1VnmmojPVigAZtyPQH7PM0Raby5tao,10619
133
+ cognite_toolkit/_cdf_tk/commands/_migrate/data_mapper.py,sha256=Y7MrE6FGa15uvboBjNyWNlslsBv4FpeP5WsrFsooxsA,5678
134
134
  cognite_toolkit/_cdf_tk/commands/_migrate/data_model.py,sha256=i1eUsNX6Dueol9STIEwyksBnBsWUk13O8qHIjW964pM,7860
135
- cognite_toolkit/_cdf_tk/commands/_migrate/default_mappings.py,sha256=KkSq_4R6hQ15ccG-jHy7vVgPwC5IDd5OaXZLvz5mIZs,5547
135
+ cognite_toolkit/_cdf_tk/commands/_migrate/default_mappings.py,sha256=ERn3qFrJFXdtXaMjHq3Gk7MxH03MGFk3FrtWCOBJQts,5544
136
136
  cognite_toolkit/_cdf_tk/commands/_migrate/issues.py,sha256=lWSnuS3CfRDbA7i1g12gJ2reJnQcLmZWxHDK19-Wxkk,5772
137
- cognite_toolkit/_cdf_tk/commands/_migrate/migration_io.py,sha256=xKjm2OKxfwjQ2DFrrLPFydQhzUjJJ20rDTdaFrkzan8,14372
137
+ cognite_toolkit/_cdf_tk/commands/_migrate/migration_io.py,sha256=wrdBH5P6NgiZQSYLR0iJ3ZvqfQ5fY-_Ne2yKv9E1g4o,16277
138
138
  cognite_toolkit/_cdf_tk/commands/_migrate/prepare.py,sha256=RfqaNoso5CyBwc-p6ckwcYqBfZXKhdJgdGIyd0TATaI,2635
139
139
  cognite_toolkit/_cdf_tk/commands/_migrate/selectors.py,sha256=N1H_-rBpPUD6pbrlcofn1uEK1bA694EUXEe1zIXeqyo,2489
140
140
  cognite_toolkit/_cdf_tk/cruds/__init__.py,sha256=kxiB8gZo0Y4TyttWHGTLPCW5R1DUkN1uTZewTvaZRjo,6298
@@ -147,7 +147,7 @@ cognite_toolkit/_cdf_tk/cruds/_resource_cruds/auth.py,sha256=iGG2_btpEqip3o6OKpc
147
147
  cognite_toolkit/_cdf_tk/cruds/_resource_cruds/classic.py,sha256=7RdiWvh6MLI1lLmt3gcqDQj61xbwREhsvoyjFuJn2F0,26402
148
148
  cognite_toolkit/_cdf_tk/cruds/_resource_cruds/configuration.py,sha256=KrL7bj8q5q18mGB2V-NDkW5U5nfseZOyorXiUbp2uLw,6100
149
149
  cognite_toolkit/_cdf_tk/cruds/_resource_cruds/data_organization.py,sha256=iXn9iAtwA8mhH-7j9GF-MlLomTcaw3GhEbFY28Wx0iA,9927
150
- cognite_toolkit/_cdf_tk/cruds/_resource_cruds/datamodel.py,sha256=kA9urLWU65fqskvdSpjJEluvuWzUcCVhWCWoho68OJY,65642
150
+ cognite_toolkit/_cdf_tk/cruds/_resource_cruds/datamodel.py,sha256=XaVUjYKfa2ceOicAgE_mPknMdQm_ldwiaNIs3wWjFA0,65823
151
151
  cognite_toolkit/_cdf_tk/cruds/_resource_cruds/extraction_pipeline.py,sha256=zv36HPO9goRmU3NM_i1wOvWQEdsgpQTI4bcAl-eis1g,18232
152
152
  cognite_toolkit/_cdf_tk/cruds/_resource_cruds/fieldops.py,sha256=SnQMbxiZ3SSYkTLXQ_vIu2HVf_WyD1jplNRJuoeOUfA,16723
153
153
  cognite_toolkit/_cdf_tk/cruds/_resource_cruds/file.py,sha256=F3n2FOWAPder4z3OTYs81VB-6C6r3oUzJsHvigdhaD0,15500
@@ -239,12 +239,12 @@ cognite_toolkit/_cdf_tk/resource_classes/robotics/frame.py,sha256=XmDqJ0pAxe_vAP
239
239
  cognite_toolkit/_cdf_tk/resource_classes/robotics/location.py,sha256=dbc9HT-bc2Qt15hHoR63SM7pg321BhNuTNjI7HHCwSA,468
240
240
  cognite_toolkit/_cdf_tk/resource_classes/robotics/map.py,sha256=j77z7CzCMiMj8r94BdUKCum9EuZRUjaSlUAy9K9DL_Q,942
241
241
  cognite_toolkit/_cdf_tk/storageio/__init__.py,sha256=SSMV-W_uqMwS9I0xazBfAyNRqKWlAuLlABropMBEa50,2434
242
- cognite_toolkit/_cdf_tk/storageio/_annotations.py,sha256=N5z2umaPwBo3OKIyGhNx2geRTBYhAeG3YhCAYm2hyao,4655
242
+ cognite_toolkit/_cdf_tk/storageio/_annotations.py,sha256=JI_g18_Y9S7pbc9gm6dZMyo3Z-bCndJXF9C2lOva0bQ,4848
243
243
  cognite_toolkit/_cdf_tk/storageio/_applications.py,sha256=CdqJueM9ZmXVh8RUme2lAgNasjAM8QTQDAfeJMm2ZYo,7026
244
244
  cognite_toolkit/_cdf_tk/storageio/_asset_centric.py,sha256=DbTvIneN8Hw3ByhdH1kXkS7Gw68oXEWtIqlZGZgLMg0,33704
245
- cognite_toolkit/_cdf_tk/storageio/_base.py,sha256=at-5wERENsFRLO1cUOMKW2poVqmkvxSe9JuXpojz4NM,12196
245
+ cognite_toolkit/_cdf_tk/storageio/_base.py,sha256=S52TFdNZuXXkMU_jUobnjW4COwkDwn47FFrKeSv64xs,12120
246
246
  cognite_toolkit/_cdf_tk/storageio/_data_classes.py,sha256=s3TH04BJ1q7rXndRhEbVMEnoOXjxrGg4n-w9Z5uUL-o,3480
247
- cognite_toolkit/_cdf_tk/storageio/_datapoints.py,sha256=nV8jaF5YLvMKhDPU3euf554GvSmfNYkzC9ZvEF7kbP8,8660
247
+ cognite_toolkit/_cdf_tk/storageio/_datapoints.py,sha256=1Cfch0lVPc4dyXH7RGfGaXudB1O3-f56_cfGw-e-ya0,8593
248
248
  cognite_toolkit/_cdf_tk/storageio/_instances.py,sha256=t9fNpHnT6kCk8LDoPj3qZXmHpyDbPF5BZ6pI8ziTyFw,10810
249
249
  cognite_toolkit/_cdf_tk/storageio/_raw.py,sha256=5WjAFiVR0KKRhMqCy1IRy1TQFWj86D7nGu5WSFNLp6U,3869
250
250
  cognite_toolkit/_cdf_tk/storageio/selectors/__init__.py,sha256=kvk7zdI_N2VobkrWTYRDuq1fSpy2Z99MsJp1sBa_KrQ,1715
@@ -285,11 +285,11 @@ cognite_toolkit/_cdf_tk/utils/text.py,sha256=1-LQMo633_hEhNhishQo7Buj-7np5Pe4qKk
285
285
  cognite_toolkit/_cdf_tk/utils/thread_safe_dict.py,sha256=NbRHcZvWpF9xHP5OkOMGFpxrPNbi0Q3Eea6PUNbGlt4,3426
286
286
  cognite_toolkit/_cdf_tk/utils/useful_types.py,sha256=oK88W6G_aK3hebORSQKZjWrq7jG-pO2lkLWSWYMlngM,1872
287
287
  cognite_toolkit/_cdf_tk/utils/validate_access.py,sha256=1puswcpgEDNCwdk91dhLqCBSu_aaUAd3Hsw21d-YVFs,21955
288
- cognite_toolkit/_cdf_tk/utils/fileio/__init__.py,sha256=ts5kYu_1Ks7xjnM6pIrVUrZe0nkYI6euYXeE4ox34xk,1199
289
- cognite_toolkit/_cdf_tk/utils/fileio/_base.py,sha256=MpWaD3lR9vrJ-kGzTiDOtChXhvFD7-xrP-Pzp7vjnLY,756
288
+ cognite_toolkit/_cdf_tk/utils/fileio/__init__.py,sha256=0rJsL3jClj_smxh_Omqchf0K9xTi1DlKgmCDjBqJ38I,1243
289
+ cognite_toolkit/_cdf_tk/utils/fileio/_base.py,sha256=eC6mRIwSD4LjyFa83BoBnhO0t3l-ctQMW295LIyxXLk,827
290
290
  cognite_toolkit/_cdf_tk/utils/fileio/_compression.py,sha256=8BAPgg5OKc3vkEEkqOvYsuyh12iXVNuEmC0omWwyJNQ,2355
291
- cognite_toolkit/_cdf_tk/utils/fileio/_readers.py,sha256=mBf0-8JFwLfyTGJH8nWpbn89VPTj9UwP3GmZtx8t3A4,13969
292
- cognite_toolkit/_cdf_tk/utils/fileio/_writers.py,sha256=T9Owx1XQ2bN1voBBLs7hUpTXihCiXgHwEhpIv_XB9xc,17732
291
+ cognite_toolkit/_cdf_tk/utils/fileio/_readers.py,sha256=plDxxRVipcddjhF8JOaVoDFeNFUmtl4fKMX8FLAU_eI,15527
292
+ cognite_toolkit/_cdf_tk/utils/fileio/_writers.py,sha256=4buAPp73Qfc0hw_LMyFI3g2DhdM4hbrasXuwMCiAcCQ,17732
293
293
  cognite_toolkit/_cdf_tk/utils/http_client/__init__.py,sha256=G8b7Bg4yIet5R4Igh3dS2SntWzE6I0iTGBeNlNsSxkQ,857
294
294
  cognite_toolkit/_cdf_tk/utils/http_client/_client.py,sha256=NTRfloXkCiS_rl5Vl1D_hsyTTowMKWDsiIR4oGwTADI,11208
295
295
  cognite_toolkit/_cdf_tk/utils/http_client/_data_classes.py,sha256=PIlSmv3spObHeoylpSzz2fqUFAjIoE89qzvc0uSOGw0,12975
@@ -300,13 +300,13 @@ cognite_toolkit/_repo_files/.gitignore,sha256=ip9kf9tcC5OguF4YF4JFEApnKYw0nG0vPi
300
300
  cognite_toolkit/_repo_files/AzureDevOps/.devops/README.md,sha256=OLA0D7yCX2tACpzvkA0IfkgQ4_swSd-OlJ1tYcTBpsA,240
301
301
  cognite_toolkit/_repo_files/AzureDevOps/.devops/deploy-pipeline.yml,sha256=brULcs8joAeBC_w_aoWjDDUHs3JheLMIR9ajPUK96nc,693
302
302
  cognite_toolkit/_repo_files/AzureDevOps/.devops/dry-run-pipeline.yml,sha256=OBFDhFWK1mlT4Dc6mDUE2Es834l8sAlYG50-5RxRtHk,723
303
- cognite_toolkit/_repo_files/GitHub/.github/workflows/deploy.yaml,sha256=IwBGesItWryS0xfmEeOGZAePQ4mxHSR6CmBQ7n9i-mY,668
304
- cognite_toolkit/_repo_files/GitHub/.github/workflows/dry-run.yaml,sha256=8Pg9SJbe4VbVP8EnGKE1hNds9TfSIGptB87N0X8xeNU,2431
305
- cognite_toolkit/_resources/cdf.toml,sha256=ATCi6pyPK0FQOsAgiYUaNz2YK2nyKdaeKVW1FOt1rGc,488
303
+ cognite_toolkit/_repo_files/GitHub/.github/workflows/deploy.yaml,sha256=7ijoHGa5OFuDD3hYSllkChA4MU_iJgAnXmUX4sTlDkg,668
304
+ cognite_toolkit/_repo_files/GitHub/.github/workflows/dry-run.yaml,sha256=5yngla4gklkissfZ1vVMphWbfMfYmFs2gt3HnckqJrI,2431
305
+ cognite_toolkit/_resources/cdf.toml,sha256=CkbioMspsFNI5FwfptPthjPiQiln-5d7B6Tm1eWW_YY,488
306
306
  cognite_toolkit/demo/__init__.py,sha256=-m1JoUiwRhNCL18eJ6t7fZOL7RPfowhCuqhYFtLgrss,72
307
307
  cognite_toolkit/demo/_base.py,sha256=6xKBUQpXZXGQ3fJ5f7nj7oT0s2n7OTAGIa17ZlKHZ5U,8052
308
- cognite_toolkit-0.6.105.dist-info/METADATA,sha256=4MHffqZKT4SsNX4s4tz-bbPUm1wGxbGvrq93n4ul1Sw,4502
309
- cognite_toolkit-0.6.105.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
310
- cognite_toolkit-0.6.105.dist-info/entry_points.txt,sha256=JlR7MH1_UMogC3QOyN4-1l36VbrCX9xUdQoHGkuJ6-4,83
311
- cognite_toolkit-0.6.105.dist-info/licenses/LICENSE,sha256=CW0DRcx5tL-pCxLEN7ts2S9g2sLRAsWgHVEX4SN9_Mc,752
312
- cognite_toolkit-0.6.105.dist-info/RECORD,,
308
+ cognite_toolkit-0.6.107.dist-info/METADATA,sha256=jKo4OnTtq7P4OwIRv8ku2Exp5ZlhakpRrUtmHwbYWKM,4502
309
+ cognite_toolkit-0.6.107.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
310
+ cognite_toolkit-0.6.107.dist-info/entry_points.txt,sha256=JlR7MH1_UMogC3QOyN4-1l36VbrCX9xUdQoHGkuJ6-4,83
311
+ cognite_toolkit-0.6.107.dist-info/licenses/LICENSE,sha256=CW0DRcx5tL-pCxLEN7ts2S9g2sLRAsWgHVEX4SN9_Mc,752
312
+ cognite_toolkit-0.6.107.dist-info/RECORD,,