cognite-toolkit 0.6.84__py3-none-any.whl → 0.6.86__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cognite-toolkit might be problematic. Click here for more details.

@@ -67,7 +67,14 @@ from cognite_toolkit._cdf_tk.utils.useful_types import (
67
67
  T_WritableCogniteResourceList,
68
68
  )
69
69
 
70
- from ._base import ConfigurableStorageIO, Page, StorageIOConfig, TableStorageIO, UploadableStorageIO, UploadItem
70
+ from ._base import (
71
+ ConfigurableStorageIO,
72
+ Page,
73
+ StorageIOConfig,
74
+ TableStorageIO,
75
+ UploadableStorageIO,
76
+ UploadItem,
77
+ )
71
78
  from .selectors import AssetCentricSelector, AssetSubtreeSelector, DataSetSelector
72
79
 
73
80
 
@@ -179,6 +186,19 @@ class BaseAssetCentricIO(
179
186
  asset_ids.update(item.asset_ids or [])
180
187
  self.client.lookup.assets.external_id(list(asset_ids))
181
188
 
189
+ def data_to_row(
190
+ self, data_chunk: Sequence[T_WritableCogniteResource], selector: AssetCentricSelector | None = None
191
+ ) -> list[dict[str, JsonVal]]:
192
+ rows: list[dict[str, JsonVal]] = []
193
+ for chunk in self.data_to_json_chunk(data_chunk, selector):
194
+ if "metadata" in chunk and isinstance(chunk["metadata"], dict):
195
+ metadata = chunk.pop("metadata")
196
+ # MyPy does understand that metadata is a dict here due to the check above.
197
+ for key, value in metadata.items(): # type: ignore[union-attr]
198
+ chunk[f"metadata.{key}"] = value
199
+ rows.append(chunk)
200
+ return rows
201
+
182
202
 
183
203
  class AssetIO(BaseAssetCentricIO[str, AssetWrite, Asset, AssetWriteList, AssetList]):
184
204
  KIND = "Assets"
@@ -230,3 +230,19 @@ class TableStorageIO(StorageIO[T_Selector, T_CogniteResource], ABC):
230
230
 
231
231
  """
232
232
  raise NotImplementedError()
233
+
234
+ @abstractmethod
235
+ def data_to_row(
236
+ self, data_chunk: Sequence[T_CogniteResource], selector: T_Selector | None = None
237
+ ) -> list[dict[str, JsonVal]]:
238
+ """Convert a chunk of data to a row-based JSON-compatible format.
239
+
240
+ Args:
241
+ data_chunk: The chunk of data to convert, which should be a writable Cognite resource list.
242
+ selector: Optional selection criteria to identify the data. This is required for some storage types.
243
+
244
+ Returns:
245
+ A list of dictionaries representing the data in a JSON-compatible format.
246
+
247
+ """
248
+ raise NotImplementedError()
@@ -106,6 +106,15 @@ class AssetCentricAggregator(ABC):
106
106
  seen.add(int_id)
107
107
  return ids
108
108
 
109
+ def _to_dataset_id(self, data_set_external_id: str | list[str] | None) -> list[int] | None:
110
+ """Converts data set external IDs to data set IDs."""
111
+ dataset_id: list[int] | None = None
112
+ if data_set_external_id is not None:
113
+ if isinstance(data_set_external_id, str):
114
+ data_set_external_id = [data_set_external_id]
115
+ dataset_id = self.client.lookup.data_sets.id(data_set_external_id, allow_empty=False)
116
+ return dataset_id
117
+
109
118
 
110
119
  class MetadataAggregator(AssetCentricAggregator, ABC, Generic[T_CogniteFilter]):
111
120
  filter_cls: type[T_CogniteFilter]
@@ -386,9 +395,10 @@ class RelationshipAggregator(AssetCentricAggregator):
386
395
  def count(
387
396
  self, hierarchy: str | list[str] | None = None, data_set_external_id: str | list[str] | None = None
388
397
  ) -> int:
389
- if hierarchy is not None or data_set_external_id is not None:
398
+ if hierarchy is not None:
390
399
  raise NotImplementedError()
391
- results = relationship_aggregate_count(self.client)
400
+ dataset_id = self._to_dataset_id(data_set_external_id)
401
+ results = relationship_aggregate_count(self.client, dataset_id)
392
402
  return sum(result.count for result in results)
393
403
 
394
404
  def used_data_sets(self, hierarchy: str | None = None) -> list[str]:
@@ -405,9 +415,10 @@ class LabelCountAggregator(AssetCentricAggregator):
405
415
  def count(
406
416
  self, hierarchy: str | list[str] | None = None, data_set_external_id: str | list[str] | None = None
407
417
  ) -> int:
408
- if hierarchy is not None or data_set_external_id is not None:
418
+ if hierarchy is not None:
409
419
  raise NotImplementedError()
410
- return label_aggregate_count(self.client)
420
+ data_set_id = self._to_dataset_id(data_set_external_id)
421
+ return label_aggregate_count(self.client, data_set_id)
411
422
 
412
423
  def used_data_sets(self, hierarchy: str | None = None) -> list[str]:
413
424
  raise NotImplementedError()
@@ -235,7 +235,7 @@ class ProducerWorkerExecutor(Generic[T_Download, T_Processed]):
235
235
  break
236
236
  except Exception as e:
237
237
  self._error_event.set()
238
- self.error_message = str(e)
238
+ self.error_message = f"{type(e).__name__} {e!s}"
239
239
  self.error_traceback = traceback.format_exc()
240
240
  self.console.print(f"[red]Error[/red] occurred while {self.download_description}: {self.error_message}")
241
241
  break
@@ -275,7 +275,7 @@ class ProducerWorkerExecutor(Generic[T_Download, T_Processed]):
275
275
  continue
276
276
  except Exception as e:
277
277
  self._error_event.set()
278
- self.error_message = str(e)
278
+ self.error_message = f"{type(e).__name__} {e!s}"
279
279
  self.error_traceback = traceback.format_exc()
280
280
  self.console.print(f"[red]Error[/red] occurred while {self.process_description}: {self.error_message}")
281
281
  break
@@ -297,7 +297,7 @@ class ProducerWorkerExecutor(Generic[T_Download, T_Processed]):
297
297
  continue
298
298
  except Exception as e:
299
299
  self._error_event.set()
300
- self.error_message = str(e)
300
+ self.error_message = f"{type(e).__name__} {e!s}"
301
301
  self.error_traceback = traceback.format_exc()
302
302
  self.console.print(f"[red]Error[/red] occurred while {self.write_description}: {self.error_message}")
303
303
  break
@@ -1,19 +1,32 @@
1
1
  from collections.abc import Sequence
2
- from typing import Literal
2
+ from typing import Literal, TypeAlias, overload
3
3
 
4
4
  from cognite.client.data_classes.capabilities import (
5
+ AllScope,
5
6
  AssetsAcl,
6
7
  Capability,
7
8
  DataModelInstancesAcl,
8
9
  DataModelsAcl,
10
+ DataSetScope,
11
+ EventsAcl,
12
+ ExtractionPipelinesAcl,
9
13
  FilesAcl,
14
+ LabelsAcl,
15
+ RelationshipsAcl,
16
+ SequencesAcl,
17
+ ThreeDAcl,
10
18
  TimeSeriesAcl,
19
+ TransformationsAcl,
20
+ WorkflowOrchestrationAcl,
11
21
  )
12
22
 
13
23
  from cognite_toolkit._cdf_tk.client import ToolkitClient
14
24
  from cognite_toolkit._cdf_tk.exceptions import AuthorizationError
25
+ from cognite_toolkit._cdf_tk.tk_warnings import HighSeverityWarning
15
26
  from cognite_toolkit._cdf_tk.utils import humanize_collection
16
27
 
28
+ Action: TypeAlias = Literal["read", "write"]
29
+
17
30
 
18
31
  class ValidateAccess:
19
32
  def __init__(self, client: ToolkitClient, default_operation: str) -> None:
@@ -21,12 +34,12 @@ class ValidateAccess:
21
34
  self.default_operation = default_operation
22
35
 
23
36
  def data_model(
24
- self, action: Sequence[Literal["read", "write"]], spaces: set[str] | None = None, operation: str | None = None
37
+ self, action: Sequence[Action], spaces: set[str] | None = None, operation: str | None = None
25
38
  ) -> list[str] | None:
26
39
  """Validate access to data models.
27
40
 
28
41
  Args:
29
- action (Sequence[Literal["read", "write"]]): The actions to validate access for.
42
+ action (Sequence[Action]): The actions to validate access for.
30
43
  spaces (Set[str] | None): The space IDs to check access for. If None, checks access for all spaces.
31
44
  operation (str | None): The operation being performed, used for error messages.
32
45
 
@@ -38,7 +51,7 @@ class ValidateAccess:
38
51
  AuthorizationError: If the user does not have permission to perform the specified action on the given space.
39
52
  """
40
53
  operation = operation or self.default_operation
41
- model_scopes, actions_str = self._set_up_read_write(
54
+ model_scopes = self._get_scopes(
42
55
  action, DataModelsAcl.Action.Read, DataModelsAcl.Action.Write, operation, "data models"
43
56
  )
44
57
  if len(model_scopes) != 1:
@@ -51,19 +64,20 @@ class ValidateAccess:
51
64
  return model_scope.space_ids
52
65
  if missing := spaces - set(model_scope.space_ids):
53
66
  raise AuthorizationError(
54
- f"You have no permission to {actions_str} the {humanize_collection(missing)!r} space(s). This is required to {operation}."
67
+ f"You have no permission to {humanize_collection(action)} the {humanize_collection(missing)!r} "
68
+ f"space(s). This is required to {operation}."
55
69
  )
56
70
  return None
57
71
  else:
58
72
  raise ValueError(f"Unexpected data model scope type: {type(model_scope)}. Expected SpaceID or All.")
59
73
 
60
74
  def instances(
61
- self, action: Sequence[Literal["read", "write"]], spaces: set[str] | None = None, operation: str | None = None
75
+ self, action: Sequence[Action], spaces: set[str] | None = None, operation: str | None = None
62
76
  ) -> list[str] | None:
63
77
  """Validate access to data model instances.
64
78
 
65
79
  Args:
66
- action (Sequence[Literal["read", "write"]]): The actions to validate access for.
80
+ action (Sequence[Action]): The actions to validate access for.
67
81
  spaces (Set[str] | None): The space IDs to check access for. If None, checks access for all spaces.
68
82
  operation (str | None): The operation being performed, used for error messages.
69
83
 
@@ -75,7 +89,7 @@ class ValidateAccess:
75
89
  AuthorizationError: If the user does not have permission to perform the specified action on the given space.
76
90
  """
77
91
  operation = operation or self.default_operation
78
- instance_scopes, action_str = self._set_up_read_write(
92
+ instance_scopes = self._get_scopes(
79
93
  action, DataModelInstancesAcl.Action.Read, DataModelInstancesAcl.Action.Write, operation, "instances"
80
94
  )
81
95
  if len(instance_scopes) != 1:
@@ -88,7 +102,8 @@ class ValidateAccess:
88
102
  return instance_scope.space_ids
89
103
  if missing := spaces - set(instance_scope.space_ids):
90
104
  raise AuthorizationError(
91
- f"You have no permission to {action_str} instances in the {humanize_collection(missing)!r} space(s). This is required to {operation} instances."
105
+ f"You have no permission to {humanize_collection(action)} instances in the "
106
+ f"{humanize_collection(missing)!r} space(s). This is required to {operation} instances."
92
107
  )
93
108
  return None
94
109
  elif isinstance(instance_scope, DataModelInstancesAcl.Scope.All):
@@ -98,12 +113,37 @@ class ValidateAccess:
98
113
  f"Unexpected data model instance scope type: {type(instance_scope)}. Expected SpaceID or All."
99
114
  )
100
115
 
116
+ @overload
117
+ def dataset_data(
118
+ self,
119
+ action: Sequence[Action],
120
+ dataset_ids: set[int],
121
+ operation: str | None = None,
122
+ missing_access: Literal["raise", "warn"] = "raise",
123
+ ) -> None: ...
124
+
125
+ @overload
101
126
  def dataset_data(
102
127
  self,
103
- action: Sequence[Literal["read", "write"]],
128
+ action: Sequence[Action],
129
+ dataset_ids: None = None,
130
+ operation: str | None = None,
131
+ missing_access: Literal["raise", "warn"] = "raise",
132
+ ) -> (
133
+ dict[Literal["assets", "events", "time series", "files", "relationships", "labels", "3D models"], list[int]]
134
+ | None
135
+ ): ...
136
+
137
+ def dataset_data(
138
+ self,
139
+ action: Sequence[Action],
104
140
  dataset_ids: set[int] | None = None,
105
141
  operation: str | None = None,
106
- ) -> list[int] | None:
142
+ missing_access: Literal["raise", "warn"] = "raise",
143
+ ) -> (
144
+ dict[Literal["assets", "events", "time series", "files", "relationships", "labels", "3D models"], list[int]]
145
+ | None
146
+ ):
107
147
  """Validate access to dataset data.
108
148
 
109
149
  Dataset data resources are:
@@ -117,23 +157,69 @@ class ValidateAccess:
117
157
  - 3D models
118
158
 
119
159
  Args:
120
- action (Sequence[Literal["read", "write"]]): The actions to validate access for
160
+ action (Sequence[Action]): The actions to validate access for
121
161
  dataset_ids (Set[int] | None): The dataset IDs to check access for. If None, checks access for all datasets.
122
- operation (str | None): The operation being performed, used for error messages.
162
+ operation (str | None): The operation being performed, used for error and warning messages.
163
+ missing_access (Literal["raise", "warn"]): Whether to raise an error or warn when access is missing for specified datasets.
164
+
123
165
  Returns:
124
- list[int] | None: Returns a list of dataset IDs if access is limited to these datasets, or None if access is granted to all datasets.
166
+ dict[
167
+ Literal["assets", "events", "time series", "files", "relationships", "labels", "3D models"], list[int]
168
+ ] | None:
169
+ If dataset_ids is None, returns a dictionary with keys as dataset data resource names and values as lists of dataset IDs the user has access to.
170
+ If dataset_ids is provided, returns None if the user has access to all specified datasets for all dataset data resources.
125
171
  Raises:
126
172
  ValueError: If the client.token.get_scope() returns an unexpected dataset data scope type.
127
173
  AuthorizationError: If the user does not have permission to perform the specified action on the given dataset.
128
174
  """
129
- raise NotImplementedError()
175
+ acls: list[tuple[str, list[Capability.Action], list[Capability.Action]]] = [
176
+ ("assets", [AssetsAcl.Action.Read], [AssetsAcl.Action.Write]),
177
+ ("events", [EventsAcl.Action.Read], [EventsAcl.Action.Write]),
178
+ ("time series", [TimeSeriesAcl.Action.Read], [TimeSeriesAcl.Action.Write]),
179
+ ("files", [FilesAcl.Action.Read], [FilesAcl.Action.Write]),
180
+ ("sequences", [SequencesAcl.Action.Read], [SequencesAcl.Action.Write]),
181
+ ("relationships", [RelationshipsAcl.Action.Read], [RelationshipsAcl.Action.Write]),
182
+ ("labels", [LabelsAcl.Action.Read], [LabelsAcl.Action.Write]),
183
+ (
184
+ "3D models",
185
+ [ThreeDAcl.Action.Read],
186
+ [ThreeDAcl.Action.Create, ThreeDAcl.Action.Update, ThreeDAcl.Action.Delete],
187
+ ),
188
+ ]
189
+ # MyPy does not understand that with the acl above, we get the correct return value.
190
+ return self._dataset_access_check( # type: ignore[return-value]
191
+ action,
192
+ dataset_ids=dataset_ids,
193
+ operation=operation,
194
+ acls=acls,
195
+ missing_access=missing_access,
196
+ )
130
197
 
198
+ @overload
131
199
  def dataset_configurations(
132
200
  self,
133
- action: Sequence[Literal["read", "write"]],
201
+ action: Sequence[Action],
202
+ dataset_ids: set[int],
203
+ operation: str | None = None,
204
+ missing_access: Literal["raise", "warn"] = "raise",
205
+ ) -> None: ...
206
+
207
+ @overload
208
+ def dataset_configurations(
209
+ self,
210
+ action: Sequence[Action],
211
+ dataset_ids: None = None,
212
+ operation: str | None = None,
213
+ missing_access: Literal["raise", "warn"] = "raise",
214
+ ) -> dict[Literal["transformations", "workflows", "extraction pipelines"], list[int]] | None: ...
215
+
216
+ def dataset_configurations(
217
+ self,
218
+ action: Sequence[Action],
134
219
  dataset_ids: set[int] | None = None,
135
220
  operation: str | None = None,
136
- ) -> list[int] | None:
221
+ missing_access: Literal["raise", "warn"] = "raise",
222
+ ) -> dict[Literal["transformations", "workflows", "extraction pipelines"], list[int]] | None:
137
223
  """Validate access configuration resources.
138
224
 
139
225
  Configuration resources are:
@@ -142,26 +228,92 @@ class ValidateAccess:
142
228
  - Extraction pipelines
143
229
 
144
230
  Args:
145
- action (Sequence[Literal["read", "write"]]): The actions to validate access for
231
+ action (Sequence[Action]): The actions to validate access for
146
232
  dataset_ids (Set[int] | None): The dataset IDs to check access for. If None, checks access for all datasets.
147
- operation (str | None): The operation being performed, used for error messages.
233
+ operation (str | None): The operation being performed, used for error and warning messages.
234
+ missing_access (Literal["raise", "warn"]): Whether to raise an error or warn when access is missing for specified datasets.
235
+
148
236
  Returns:
149
- list[int] | None: Returns a list of dataset IDs if access is limited to these datasets, or None if access is granted to all datasets.
237
+ dict[Literal["transformations", "workflows", "extraction pipelines"], list[int] | None]:
238
+ If dataset_ids is None, returns a dictionary with keys as configuration resource names and values as lists of dataset IDs the user has access to.
239
+ If dataset_ids is provided, returns None if the user has access to all specified datasets for all configuration resources.
240
+
150
241
  Raises:
151
242
  ValueError: If the client.token.get_scope() returns an unexpected dataset configuration scope type.
152
243
  AuthorizationError: If the user does not have permission to perform the specified action on the given dataset.
153
244
  """
154
- raise NotImplementedError()
245
+ acls: list[tuple[str, list[Capability.Action], list[Capability.Action]]] = [
246
+ ("transformations", [TransformationsAcl.Action.Read], [TransformationsAcl.Action.Write]),
247
+ ("workflows", [WorkflowOrchestrationAcl.Action.Read], [WorkflowOrchestrationAcl.Action.Write]),
248
+ ("extraction pipelines", [ExtractionPipelinesAcl.Action.Read], [ExtractionPipelinesAcl.Action.Write]),
249
+ ]
250
+ # MyPy does not understand that with the acl above, we get the correct return value.
251
+ return self._dataset_access_check( # type: ignore[return-value]
252
+ action,
253
+ dataset_ids=dataset_ids,
254
+ operation=operation,
255
+ acls=acls,
256
+ missing_access=missing_access,
257
+ )
258
+
259
+ def _dataset_access_check(
260
+ self,
261
+ action: Sequence[Action],
262
+ dataset_ids: set[int] | None,
263
+ operation: str | None,
264
+ missing_access: Literal["raise", "warn"],
265
+ acls: Sequence[tuple[str, list[Capability.Action], list[Capability.Action]]],
266
+ ) -> dict[str, list[int]] | None:
267
+ need_access_to = set(dataset_ids) if dataset_ids is not None else None
268
+ no_access: list[str] = []
269
+ output: dict[str, list[int]] = {}
270
+ for name, read_actions, write_actions in acls:
271
+ actions = [
272
+ acl_action for word in action for acl_action in {"read": read_actions, "write": write_actions}[word]
273
+ ]
274
+ scopes = self.client.token.get_scope(actions)
275
+ if scopes is None:
276
+ no_access.append(name)
277
+ continue
278
+ # First check for 'all' scope
279
+ for scope in scopes:
280
+ if isinstance(scope, AllScope):
281
+ break
282
+ else:
283
+ # No 'all' scope found, check dataset scopes
284
+ for scope in scopes:
285
+ if isinstance(scope, DataSetScope):
286
+ if need_access_to is None:
287
+ output[name] = scope.ids
288
+ break
289
+ missing_data_set = need_access_to - set(scope.ids)
290
+ if missing_data_set:
291
+ no_access.append(name)
292
+ break
293
+ operation = operation or self.default_operation
294
+ if no_access:
295
+ message = f"You have no permission to {humanize_collection(action)} {humanize_collection(no_access)}."
296
+ if dataset_ids:
297
+ dataset_external_ids = self.client.lookup.data_sets.external_id(list(dataset_ids))
298
+ plural = "s" if len(dataset_external_ids) > 1 else ""
299
+ message = f"{message[:-1]} on dataset{plural} {humanize_collection(dataset_external_ids)}."
300
+ if missing_access == "raise":
301
+ raise AuthorizationError(f"{message} This is required to {operation}.")
302
+ else:
303
+ HighSeverityWarning(f"{message}. You will have limited functionality to {operation}.").print_warning()
304
+ elif dataset_ids is not None:
305
+ return None
306
+ return output or None
155
307
 
156
308
  def timeseries(
157
309
  self,
158
- action: Sequence[Literal["read", "write"]],
310
+ action: Sequence[Action],
159
311
  dataset_ids: set[int] | None = None,
160
312
  operation: str | None = None,
161
313
  ) -> dict[str, list[str]] | None:
162
314
  """Validate access to time series.
163
315
  Args:
164
- action (Sequence[Literal["read", "write"]]): The actions to validate access for.
316
+ action (Sequence[Action]): The actions to validate access for.
165
317
  dataset_ids (Set[int] | None): The dataset IDs to check access for. If None, checks access for all datasets.
166
318
  operation (str | None): The operation being performed, used for error messages.
167
319
  Returns:
@@ -171,7 +323,7 @@ class ValidateAccess:
171
323
  AuthorizationError: If the user does not have permission to perform the specified action on the given dataset or time series.
172
324
  """
173
325
  operation = operation or self.default_operation
174
- timeseries_scopes, actions_str = self._set_up_read_write(
326
+ timeseries_scopes = self._get_scopes(
175
327
  action, TimeSeriesAcl.Action.Read, TimeSeriesAcl.Action.Write, operation, "time series"
176
328
  )
177
329
 
@@ -185,7 +337,7 @@ class ValidateAccess:
185
337
  if not missing:
186
338
  return None
187
339
  raise AuthorizationError(
188
- f"You have no permission to {actions_str} time series in dataset {humanize_collection(missing)}. This is required to {operation}."
340
+ f"You have no permission to {humanize_collection(action)} time series in dataset {humanize_collection(missing)}. This is required to {operation}."
189
341
  )
190
342
  output: dict[str, list[str]] = {}
191
343
  for scope in timeseries_scopes:
@@ -204,14 +356,14 @@ class ValidateAccess:
204
356
 
205
357
  def files(
206
358
  self,
207
- action: Sequence[Literal["read", "write"]],
359
+ action: Sequence[Action],
208
360
  dataset_ids: set[int] | None = None,
209
361
  operation: str | None = None,
210
362
  ) -> dict[str, list[str]] | None:
211
363
  """Validate access to files.
212
364
 
213
365
  Args:
214
- action (Sequence[Literal["read", "write"]]): The actions to validate access for
366
+ action (Sequence[Action]): The actions to validate access for
215
367
  dataset_ids (Set[int] | None): The dataset IDs to check access for. If None, checks access for all datasets.
216
368
  operation (str | None): The operation being performed, used for error messages.
217
369
  Returns:
@@ -222,9 +374,7 @@ class ValidateAccess:
222
374
  dataset.
223
375
  """
224
376
  operation = operation or self.default_operation
225
- file_scopes, actions_str = self._set_up_read_write(
226
- action, FilesAcl.Action.Read, FilesAcl.Action.Write, operation, "files"
227
- )
377
+ file_scopes = self._get_scopes(action, FilesAcl.Action.Read, FilesAcl.Action.Write, operation, "files")
228
378
  if isinstance(file_scopes[0], FilesAcl.Scope.All):
229
379
  return None
230
380
  if dataset_ids is not None:
@@ -235,7 +385,8 @@ class ValidateAccess:
235
385
  if not missing:
236
386
  return None
237
387
  raise AuthorizationError(
238
- f"You have no permission to {actions_str} files in dataset {humanize_collection(missing)}. This is required to {operation}."
388
+ f"You have no permission to {humanize_collection(action)} files in dataset "
389
+ f"{humanize_collection(missing)}. This is required to {operation}."
239
390
  )
240
391
  output: dict[str, list[str]] = {}
241
392
  for scope in file_scopes:
@@ -247,13 +398,13 @@ class ValidateAccess:
247
398
 
248
399
  def assets(
249
400
  self,
250
- action: Sequence[Literal["read", "write"]],
401
+ action: Sequence[Action],
251
402
  dataset_ids: set[int] | None = None,
252
403
  operation: str | None = None,
253
404
  ) -> dict[str, list[str]] | None:
254
405
  """Validate access to assets.
255
406
  Args:
256
- action (Sequence[Literal["read", "write"]]): The actions to validate access for
407
+ action (Sequence[Action]): The actions to validate access for
257
408
  dataset_ids (Set[int] | None): The dataset IDs to check access for. If None, checks access for all datasets.
258
409
  operation (str | None): The operation being performed, used for error messages.
259
410
  Returns:
@@ -265,9 +416,7 @@ class ValidateAccess:
265
416
  dataset.
266
417
  """
267
418
  operation = operation or self.default_operation
268
- asset_scopes, actions_str = self._set_up_read_write(
269
- action, AssetsAcl.Action.Read, AssetsAcl.Action.Write, operation, "assets"
270
- )
419
+ asset_scopes = self._get_scopes(action, AssetsAcl.Action.Read, AssetsAcl.Action.Write, operation, "assets")
271
420
  if isinstance(asset_scopes[0], AssetsAcl.Scope.All):
272
421
  return None
273
422
  if dataset_ids is not None:
@@ -278,7 +427,8 @@ class ValidateAccess:
278
427
  if not missing:
279
428
  return None
280
429
  raise AuthorizationError(
281
- f"You have no permission to {actions_str} assets in dataset(s) {humanize_collection(missing)}. This is required to {operation}."
430
+ f"You have no permission to {humanize_collection(action)} assets in dataset(s) "
431
+ f"{humanize_collection(missing)}. This is required to {operation}."
282
432
  )
283
433
  output: dict[str, list[str]] = {}
284
434
  for scope in asset_scopes:
@@ -288,19 +438,31 @@ class ValidateAccess:
288
438
  raise ValueError(f"Unexpected asset scope type: {type(scope)}. Expected DataSet or All.")
289
439
  return output
290
440
 
291
- def _set_up_read_write(
441
+ def _get_scopes(
292
442
  self,
293
- action: Sequence[Literal["read", "write"]],
443
+ action: Sequence[Action],
294
444
  read: Capability.Action,
295
445
  write: Capability.Action,
296
446
  operation: str,
297
447
  name: str,
298
- ) -> tuple[list[Capability.Scope], str]:
299
- actions_str = humanize_collection(action, bind_word="and")
448
+ ) -> list[Capability.Scope]:
449
+ """Helper method to get scopes for the given action.
450
+
451
+ Args:
452
+ action (Sequence[Action]): The actions to validate access for.
453
+ read (Capability.Action): The read action.
454
+ write (Capability.Action): The write action.
455
+ operation (str): The operation being performed, used for error messages.
456
+ name (str): The name of the resource being accessed, used for error messages.
457
+
458
+ Returns:
459
+ list[Capability.Scope]: The scopes for the given action.
460
+ """
300
461
  actions = [{"read": read, "write": write}[a] for a in action]
301
462
  scopes = self.client.token.get_scope(actions)
302
463
  if scopes is None:
303
464
  raise AuthorizationError(
304
- f"You have no permission to {actions_str} {name}. This is required to {operation}."
465
+ f"You have no permission to {humanize_collection(action, bind_word='and')} {name}. "
466
+ f"This is required to {operation}."
305
467
  )
306
- return scopes, actions_str
468
+ return scopes
@@ -12,7 +12,7 @@ jobs:
12
12
  environment: dev
13
13
  name: Deploy
14
14
  container:
15
- image: cognite/toolkit:0.6.84
15
+ image: cognite/toolkit:0.6.86
16
16
  env:
17
17
  CDF_CLUSTER: ${{ vars.CDF_CLUSTER }}
18
18
  CDF_PROJECT: ${{ vars.CDF_PROJECT }}
@@ -10,7 +10,7 @@ jobs:
10
10
  environment: dev
11
11
  name: Deploy Dry Run
12
12
  container:
13
- image: cognite/toolkit:0.6.84
13
+ image: cognite/toolkit:0.6.86
14
14
  env:
15
15
  CDF_CLUSTER: ${{ vars.CDF_CLUSTER }}
16
16
  CDF_PROJECT: ${{ vars.CDF_PROJECT }}
@@ -4,7 +4,7 @@ default_env = "<DEFAULT_ENV_PLACEHOLDER>"
4
4
  [modules]
5
5
  # This is the version of the modules. It should not be changed manually.
6
6
  # It will be updated by the 'cdf modules upgrade' command.
7
- version = "0.6.84"
7
+ version = "0.6.86"
8
8
 
9
9
  [alpha_flags]
10
10
  external-libraries = true
@@ -1 +1 @@
1
- __version__ = "0.6.84"
1
+ __version__ = "0.6.86"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cognite_toolkit
3
- Version: 0.6.84
3
+ Version: 0.6.86
4
4
  Summary: Official Cognite Data Fusion tool for project templates and configuration deployment
5
5
  Project-URL: Homepage, https://docs.cognite.com/cdf/deploy/cdf_toolkit/
6
6
  Project-URL: Changelog, https://github.com/cognitedata/toolkit/releases