dapla-toolbelt-metadata 0.9.1__py3-none-any.whl → 0.9.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dapla-toolbelt-metadata might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  """Document dataset."""
2
2
 
3
- from datadoc_model import model
3
+ from datadoc_model.all_optional import model
4
4
 
5
5
  from .core import Datadoc
6
6
  from .dapla_dataset_path_info import DaplaDatasetPathInfo
@@ -0,0 +1,333 @@
1
+ """Code relating to merging metadata from an existing metadata document and metadata extracted from a new dataset.
2
+
3
+ This is primarily convenience functionality for users whereby they can programmatically generate metadata without
4
+ having to manually enter it. This is primarily useful when data is sharded by time (i.e. each dataset applies for
5
+ a particular period like a month or a year). Assuming there aren't structural changes, the metadata may be reused
6
+ for all periods.
7
+
8
+ It is important to be able to detect changes in the structure of the data and warn users about this so that they can
9
+ make changes as appropriate.
10
+ """
11
+
12
+ import copy
13
+ import logging
14
+ import warnings
15
+ from collections.abc import Iterable
16
+ from dataclasses import dataclass
17
+ from dataclasses import field
18
+ from pathlib import Path
19
+ from typing import cast
20
+
21
+ import datadoc_model
22
+ import datadoc_model.all_optional.model as all_optional_model
23
+ import datadoc_model.required.model as required_model
24
+ from cloudpathlib import CloudPath
25
+
26
+ from dapla_metadata.datasets.dapla_dataset_path_info import DaplaDatasetPathInfo
27
+ from dapla_metadata.datasets.utility.constants import (
28
+ DATASET_FIELDS_FROM_EXISTING_METADATA,
29
+ )
30
+ from dapla_metadata.datasets.utility.constants import INCONSISTENCIES_MESSAGE
31
+ from dapla_metadata.datasets.utility.utils import OptionalDatadocMetadataType
32
+ from dapla_metadata.datasets.utility.utils import VariableListType
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+ BUCKET_NAME_MESSAGE = "Bucket name"
37
+ DATA_PRODUCT_NAME_MESSAGE = "Data product name"
38
+ DATASET_STATE_MESSAGE = "Dataset state"
39
+ DATASET_SHORT_NAME_MESSAGE = "Dataset short name"
40
+ VARIABLES_ADDITIONAL_MESSAGE = (
41
+ "Dataset has additional variables than defined in metadata"
42
+ )
43
+ VARIABLE_RENAME_MESSAGE = "Variables have been renamed in the dataset"
44
+ VARIABLE_ORDER_MESSAGE = "The order of variables in the dataset has changed"
45
+ VARIABLE_DATATYPES_MESSAGE = "Variable datatypes differ"
46
+ VARIABLES_FEWER_MESSAGE = "Dataset has fewer variables than defined in metadata"
47
+
48
+
49
+ class InconsistentDatasetsWarning(UserWarning):
50
+ """Existing and new datasets differ significantly from one another."""
51
+
52
+
53
+ class InconsistentDatasetsError(ValueError):
54
+ """Existing and new datasets differ significantly from one another."""
55
+
56
+
57
+ @dataclass
58
+ class DatasetConsistencyStatus:
59
+ """Store the status for different aspects of dataset consistency.
60
+
61
+ Attributes:
62
+ message: Communicates to the user what aspect is inconsistent.
63
+ success: False if inconsistency is detected.
64
+ variables: Optionally communicate which variables are affected.
65
+ """
66
+
67
+ message: str
68
+ success: bool
69
+ variables: Iterable[str] = field(default_factory=list)
70
+
71
+ def __str__(self) -> str:
72
+ """Format the user message."""
73
+ message = self.message
74
+ if self.variables:
75
+ message += f"\n\tVariables: {self.variables}"
76
+ return message
77
+
78
+
79
+ def check_dataset_consistency(
80
+ new_dataset_path: Path | CloudPath,
81
+ existing_dataset_path: Path,
82
+ ) -> list[DatasetConsistencyStatus]:
83
+ """Run consistency tests.
84
+
85
+ Args:
86
+ new_dataset_path: Path to the dataset to be documented.
87
+ existing_dataset_path: Path stored in the existing metadata.
88
+
89
+ Returns:
90
+ List of consistency check results.
91
+ """
92
+ new_dataset_path_info = DaplaDatasetPathInfo(new_dataset_path)
93
+ existing_dataset_path_info = DaplaDatasetPathInfo(existing_dataset_path)
94
+ return [
95
+ DatasetConsistencyStatus(
96
+ message=BUCKET_NAME_MESSAGE,
97
+ success=(
98
+ new_dataset_path_info.bucket_name
99
+ == existing_dataset_path_info.bucket_name
100
+ ),
101
+ ),
102
+ DatasetConsistencyStatus(
103
+ message=DATA_PRODUCT_NAME_MESSAGE,
104
+ success=(
105
+ new_dataset_path_info.statistic_short_name
106
+ == existing_dataset_path_info.statistic_short_name
107
+ ),
108
+ ),
109
+ DatasetConsistencyStatus(
110
+ message=DATASET_STATE_MESSAGE,
111
+ success=(
112
+ new_dataset_path_info.dataset_state
113
+ == existing_dataset_path_info.dataset_state
114
+ ),
115
+ ),
116
+ DatasetConsistencyStatus(
117
+ message=DATASET_SHORT_NAME_MESSAGE,
118
+ success=(
119
+ new_dataset_path_info.dataset_short_name
120
+ == existing_dataset_path_info.dataset_short_name
121
+ ),
122
+ ),
123
+ ]
124
+
125
+
126
+ def check_variables_consistency(
127
+ extracted_variables: VariableListType,
128
+ existing_variables: VariableListType,
129
+ ) -> list[DatasetConsistencyStatus]:
130
+ """Check for consistency in variables structure.
131
+
132
+ Compares the existing metadata and that extracted from the new dataset and provides
133
+ highly detailed feedback on what is different between them.
134
+
135
+ We don't return all the results because that could create conflicting messages and false positives.
136
+
137
+ Args:
138
+ extracted_variables (VariableListType): Variables extracted from the new dataset.
139
+ existing_variables (VariableListType): Variables already documented in existing metadata
140
+
141
+ Returns:
142
+ list[DatasetConsistencyStatus]: The list of checks and whether they were successful.
143
+ """
144
+ extracted_names_set = {v.short_name or "" for v in extracted_variables}
145
+ existing_names_set = {v.short_name or "" for v in existing_variables}
146
+ same_length = len(extracted_variables) == len(existing_variables)
147
+ more_extracted_variables = extracted_names_set.difference(existing_names_set)
148
+ fewer_extracted_variables = existing_names_set.difference(extracted_names_set)
149
+ results = []
150
+ if same_length:
151
+ if more_extracted_variables:
152
+ results.append(
153
+ DatasetConsistencyStatus(
154
+ message=VARIABLE_RENAME_MESSAGE,
155
+ variables=more_extracted_variables,
156
+ success=not bool(more_extracted_variables),
157
+ )
158
+ )
159
+ else:
160
+ results.append(
161
+ DatasetConsistencyStatus(
162
+ message=VARIABLE_ORDER_MESSAGE,
163
+ success=[v.short_name or "" for v in extracted_variables]
164
+ == [v.short_name or "" for v in existing_variables],
165
+ )
166
+ )
167
+ results.append(
168
+ DatasetConsistencyStatus(
169
+ message=VARIABLE_DATATYPES_MESSAGE,
170
+ success=[v.data_type for v in extracted_variables]
171
+ == [v.data_type for v in existing_variables],
172
+ )
173
+ )
174
+ else:
175
+ results.extend(
176
+ [
177
+ DatasetConsistencyStatus(
178
+ message=VARIABLES_ADDITIONAL_MESSAGE,
179
+ variables=more_extracted_variables,
180
+ success=not bool(more_extracted_variables),
181
+ ),
182
+ DatasetConsistencyStatus(
183
+ message=VARIABLES_FEWER_MESSAGE,
184
+ variables=fewer_extracted_variables,
185
+ success=not bool(fewer_extracted_variables),
186
+ ),
187
+ ]
188
+ )
189
+ return results
190
+
191
+
192
+ def check_ready_to_merge(
193
+ results: list[DatasetConsistencyStatus], *, errors_as_warnings: bool
194
+ ) -> None:
195
+ """Check if the datasets are consistent enough to make a successful merge of metadata.
196
+
197
+ Args:
198
+ results: List if dict with property name and boolean success flag
199
+ errors_as_warnings: True if failing checks should be raised as warnings, not errors.
200
+
201
+ Raises:
202
+ InconsistentDatasetsError: If inconsistencies are found and `errors_as_warnings == False`
203
+ """
204
+ if failures := [result for result in results if not result.success]:
205
+ messages_list = "\n - ".join(str(f) for f in failures)
206
+ msg = f"{INCONSISTENCIES_MESSAGE}\n - {messages_list}"
207
+ if errors_as_warnings:
208
+ warnings.warn(
209
+ message=msg,
210
+ category=InconsistentDatasetsWarning,
211
+ stacklevel=2,
212
+ )
213
+ else:
214
+ raise InconsistentDatasetsError(
215
+ msg,
216
+ )
217
+
218
+
219
+ def override_dataset_fields(
220
+ merged_metadata: all_optional_model.DatadocMetadata,
221
+ existing_metadata: all_optional_model.DatadocMetadata
222
+ | required_model.DatadocMetadata,
223
+ ) -> None:
224
+ """Overrides specific fields in the dataset of `merged_metadata` with values from the dataset of `existing_metadata`.
225
+
226
+ This function iterates over a predefined list of fields, `DATASET_FIELDS_FROM_EXISTING_METADATA`,
227
+ and sets the corresponding fields in the `merged_metadata.dataset` object to the values
228
+ from the `existing_metadata.dataset` object.
229
+
230
+ Args:
231
+ merged_metadata: An instance of `DatadocMetadata` containing the dataset to be updated.
232
+ existing_metadata: An instance of `DatadocMetadata` containing the dataset whose values are used to update `merged_metadata.dataset`.
233
+
234
+ Returns:
235
+ `None`.
236
+ """
237
+ if merged_metadata.dataset and existing_metadata.dataset:
238
+ # Override the fields as defined
239
+ for field in DATASET_FIELDS_FROM_EXISTING_METADATA:
240
+ setattr(
241
+ merged_metadata.dataset,
242
+ field,
243
+ getattr(existing_metadata.dataset, field),
244
+ )
245
+
246
+
247
+ def merge_variables(
248
+ existing_metadata: OptionalDatadocMetadataType,
249
+ extracted_metadata: all_optional_model.DatadocMetadata,
250
+ merged_metadata: all_optional_model.DatadocMetadata,
251
+ ) -> all_optional_model.DatadocMetadata:
252
+ """Merges variables from the extracted metadata into the existing metadata and updates the merged metadata.
253
+
254
+ This function compares the variables from `extracted_metadata` with those in `existing_metadata`.
255
+ For each variable in `extracted_metadata`, it checks if a variable with the same `short_name` exists
256
+ in `existing_metadata`. If a match is found, it updates the existing variable with information from
257
+ `extracted_metadata`. If no match is found, the variable from `extracted_metadata` is directly added to `merged_metadata`.
258
+
259
+ Args:
260
+ existing_metadata: The metadata object containing the current state of variables.
261
+ extracted_metadata: The metadata object containing new or updated variables to merge.
262
+ merged_metadata: The metadata object that will contain the result of the merge.
263
+
264
+ Returns:
265
+ all_optional_model.DatadocMetadata: The `merged_metadata` object containing variables from both `existing_metadata`
266
+ and `extracted_metadata`.
267
+ """
268
+ if (
269
+ existing_metadata is not None
270
+ and existing_metadata.variables is not None
271
+ and extracted_metadata is not None
272
+ and extracted_metadata.variables is not None
273
+ and merged_metadata.variables is not None
274
+ ):
275
+ for extracted in extracted_metadata.variables:
276
+ existing = next(
277
+ (
278
+ existing
279
+ for existing in existing_metadata.variables
280
+ if existing.short_name == extracted.short_name
281
+ ),
282
+ None,
283
+ )
284
+ if existing:
285
+ existing.id = (
286
+ None # Set to None so that it will be set assigned a fresh ID later
287
+ )
288
+ existing.contains_data_from = (
289
+ extracted.contains_data_from or existing.contains_data_from
290
+ )
291
+ existing.contains_data_until = (
292
+ extracted.contains_data_until or existing.contains_data_until
293
+ )
294
+ merged_metadata.variables.append(
295
+ cast("datadoc_model.all_optional.model.Variable", existing)
296
+ )
297
+ else:
298
+ # If there is no existing metadata for this variable, we just use what we have extracted
299
+ merged_metadata.variables.append(extracted)
300
+ return merged_metadata
301
+
302
+
303
+ def merge_metadata(
304
+ extracted_metadata: all_optional_model.DatadocMetadata | None,
305
+ existing_metadata: OptionalDatadocMetadataType,
306
+ ) -> all_optional_model.DatadocMetadata:
307
+ if not existing_metadata:
308
+ logger.warning(
309
+ "No existing metadata found, no merge to perform. Continuing with extracted metadata.",
310
+ )
311
+ return extracted_metadata or all_optional_model.DatadocMetadata()
312
+
313
+ if not extracted_metadata:
314
+ return cast("all_optional_model.DatadocMetadata", existing_metadata)
315
+
316
+ # Use the extracted metadata as a base
317
+ merged_metadata = all_optional_model.DatadocMetadata(
318
+ dataset=copy.deepcopy(extracted_metadata.dataset),
319
+ variables=[],
320
+ )
321
+
322
+ override_dataset_fields(
323
+ merged_metadata=merged_metadata,
324
+ existing_metadata=existing_metadata,
325
+ )
326
+
327
+ # Merge variables.
328
+ # For each extracted variable, copy existing metadata into the merged metadata
329
+ return merge_variables(
330
+ existing_metadata=existing_metadata,
331
+ extracted_metadata=extracted_metadata,
332
+ merged_metadata=merged_metadata,
333
+ )
@@ -5,7 +5,6 @@ from __future__ import annotations
5
5
  import copy
6
6
  import json
7
7
  import logging
8
- import warnings
9
8
  from concurrent.futures import ThreadPoolExecutor
10
9
  from pathlib import Path
11
10
  from typing import TYPE_CHECKING
@@ -17,6 +16,11 @@ from datadoc_model.all_optional.model import DataSetStatus
17
16
 
18
17
  from dapla_metadata._shared import config
19
18
  from dapla_metadata.dapla import user_info
19
+ from dapla_metadata.datasets._merge import DatasetConsistencyStatus
20
+ from dapla_metadata.datasets._merge import check_dataset_consistency
21
+ from dapla_metadata.datasets._merge import check_ready_to_merge
22
+ from dapla_metadata.datasets._merge import check_variables_consistency
23
+ from dapla_metadata.datasets._merge import merge_metadata
20
24
  from dapla_metadata.datasets.compatibility import is_metadata_in_container_structure
21
25
  from dapla_metadata.datasets.compatibility import upgrade_metadata
22
26
  from dapla_metadata.datasets.dapla_dataset_path_info import DaplaDatasetPathInfo
@@ -26,7 +30,6 @@ from dapla_metadata.datasets.statistic_subject_mapping import StatisticSubjectMa
26
30
  from dapla_metadata.datasets.utility.constants import (
27
31
  DEFAULT_SPATIAL_COVERAGE_DESCRIPTION,
28
32
  )
29
- from dapla_metadata.datasets.utility.constants import INCONSISTENCIES_MESSAGE
30
33
  from dapla_metadata.datasets.utility.constants import METADATA_DOCUMENT_FILE_SUFFIX
31
34
  from dapla_metadata.datasets.utility.constants import NUM_OBLIGATORY_DATASET_FIELDS
32
35
  from dapla_metadata.datasets.utility.constants import NUM_OBLIGATORY_VARIABLES_FIELDS
@@ -34,7 +37,6 @@ from dapla_metadata.datasets.utility.utils import OptionalDatadocMetadataType
34
37
  from dapla_metadata.datasets.utility.utils import calculate_percentage
35
38
  from dapla_metadata.datasets.utility.utils import derive_assessment_from_state
36
39
  from dapla_metadata.datasets.utility.utils import get_timestamp_now
37
- from dapla_metadata.datasets.utility.utils import merge_variables
38
40
  from dapla_metadata.datasets.utility.utils import normalize_path
39
41
  from dapla_metadata.datasets.utility.utils import (
40
42
  num_obligatory_dataset_fields_completed,
@@ -42,9 +44,9 @@ from dapla_metadata.datasets.utility.utils import (
42
44
  from dapla_metadata.datasets.utility.utils import (
43
45
  num_obligatory_variables_fields_completed,
44
46
  )
45
- from dapla_metadata.datasets.utility.utils import override_dataset_fields
46
47
  from dapla_metadata.datasets.utility.utils import set_dataset_owner
47
48
  from dapla_metadata.datasets.utility.utils import set_default_values_dataset
49
+ from dapla_metadata.datasets.utility.utils import set_default_values_pseudonymization
48
50
  from dapla_metadata.datasets.utility.utils import set_default_values_variables
49
51
 
50
52
  if TYPE_CHECKING:
@@ -53,18 +55,9 @@ if TYPE_CHECKING:
53
55
 
54
56
  from cloudpathlib import CloudPath
55
57
 
56
-
57
58
  logger = logging.getLogger(__name__)
58
59
 
59
60
 
60
- class InconsistentDatasetsWarning(UserWarning):
61
- """Existing and new datasets differ significantly from one another."""
62
-
63
-
64
- class InconsistentDatasetsError(ValueError):
65
- """Existing and new datasets differ significantly from one another."""
66
-
67
-
68
61
  class Datadoc:
69
62
  """Handle reading, updating and writing of metadata.
70
63
 
@@ -118,7 +111,7 @@ class Datadoc:
118
111
  self.variables: list = []
119
112
  self.variables_lookup: dict[str, all_optional_model.Variable] = {}
120
113
  self.explicitly_defined_metadata_document = False
121
- self.dataset_consistency_status: list = []
114
+ self.dataset_consistency_status: list[DatasetConsistencyStatus] = []
122
115
  if metadata_document_path:
123
116
  self.metadata_document = normalize_path(metadata_document_path)
124
117
  self.explicitly_defined_metadata_document = True
@@ -169,20 +162,22 @@ class Datadoc:
169
162
  ):
170
163
  extracted_metadata = self._extract_metadata_from_dataset(self.dataset_path)
171
164
 
172
- if extracted_metadata is not None:
173
- existing_file_path = self._get_existing_file_path(extracted_metadata)
174
- if (
175
- self.dataset_path
176
- and existing_file_path is not None
177
- and extracted_metadata is not None
178
- and existing_metadata is not None
179
- ):
180
- self.dataset_consistency_status = self._check_dataset_consistency(
181
- self.dataset_path,
182
- Path(existing_file_path),
183
- extracted_metadata,
184
- existing_metadata,
165
+ if (
166
+ self.dataset_path
167
+ and self.metadata_document
168
+ and extracted_metadata
169
+ and existing_metadata
170
+ ):
171
+ self.dataset_consistency_status = check_dataset_consistency(
172
+ self.dataset_path,
173
+ Path(self.metadata_document),
174
+ )
175
+ self.dataset_consistency_status.extend(
176
+ check_variables_consistency(
177
+ extracted_metadata.variables or [],
178
+ existing_metadata.variables or [],
185
179
  )
180
+ )
186
181
 
187
182
  if (
188
183
  self.dataset_path
@@ -192,11 +187,11 @@ class Datadoc:
192
187
  and extracted_metadata is not None
193
188
  and existing_metadata is not None
194
189
  ):
195
- self._check_ready_to_merge(
190
+ check_ready_to_merge(
196
191
  self.dataset_consistency_status,
197
192
  errors_as_warnings=self.errors_as_warnings,
198
193
  )
199
- merged_metadata = self._merge_metadata(
194
+ merged_metadata = merge_metadata(
200
195
  extracted_metadata,
201
196
  existing_metadata,
202
197
  )
@@ -214,19 +209,6 @@ class Datadoc:
214
209
  set_dataset_owner(self.dataset)
215
210
  self._create_variables_lookup()
216
211
 
217
- def _get_existing_file_path(
218
- self,
219
- extracted_metadata: all_optional_model.DatadocMetadata | None,
220
- ) -> str:
221
- if (
222
- extracted_metadata is not None
223
- and extracted_metadata.dataset is not None
224
- and extracted_metadata.dataset.file_path is not None
225
- ):
226
- return extracted_metadata.dataset.file_path
227
- msg = "Could not access existing dataset file path"
228
- raise ValueError(msg)
229
-
230
212
  def _set_metadata(
231
213
  self,
232
214
  merged_metadata: OptionalDatadocMetadataType,
@@ -244,134 +226,6 @@ class Datadoc:
244
226
  v.short_name: v for v in self.variables if v.short_name
245
227
  }
246
228
 
247
- @staticmethod
248
- def _check_dataset_consistency(
249
- new_dataset_path: Path | CloudPath,
250
- existing_dataset_path: Path,
251
- extracted_metadata: all_optional_model.DatadocMetadata,
252
- existing_metadata: OptionalDatadocMetadataType,
253
- ) -> list[dict[str, object]]:
254
- """Run consistency tests.
255
-
256
- Args:
257
- new_dataset_path: Path to the dataset to be documented.
258
- existing_dataset_path: Path stored in the existing metadata.
259
- extracted_metadata: Metadata extracted from a physical dataset.
260
- existing_metadata: Metadata from a previously created metadata document.
261
-
262
- Returns:
263
- List if dict with property name and boolean success flag
264
- """
265
- new_dataset_path_info = DaplaDatasetPathInfo(new_dataset_path)
266
- existing_dataset_path_info = DaplaDatasetPathInfo(existing_dataset_path)
267
- return [
268
- {
269
- "name": "Bucket name",
270
- "success": (
271
- new_dataset_path_info.bucket_name
272
- == existing_dataset_path_info.bucket_name
273
- ),
274
- },
275
- {
276
- "name": "Data product name",
277
- "success": (
278
- new_dataset_path_info.statistic_short_name
279
- == existing_dataset_path_info.statistic_short_name
280
- ),
281
- },
282
- {
283
- "name": "Dataset state",
284
- "success": (
285
- new_dataset_path_info.dataset_state
286
- == existing_dataset_path_info.dataset_state
287
- ),
288
- },
289
- {
290
- "name": "Dataset short name",
291
- "success": (
292
- new_dataset_path_info.dataset_short_name
293
- == existing_dataset_path_info.dataset_short_name
294
- ),
295
- },
296
- {
297
- "name": "Variable names",
298
- "success": (
299
- existing_metadata is not None
300
- and {v.short_name for v in extracted_metadata.variables or []}
301
- == {v.short_name for v in existing_metadata.variables or []}
302
- ),
303
- },
304
- {
305
- "name": "Variable datatypes",
306
- "success": (
307
- existing_metadata is not None
308
- and [v.data_type for v in extracted_metadata.variables or []]
309
- == [v.data_type for v in existing_metadata.variables or []]
310
- ),
311
- },
312
- ]
313
-
314
- @staticmethod
315
- def _check_ready_to_merge(
316
- results: list[dict[str, object]], *, errors_as_warnings: bool
317
- ) -> None:
318
- """Check if the datasets are consistent enough to make a successful merge of metadata.
319
-
320
- Args:
321
- results: List if dict with property name and boolean success flag
322
- errors_as_warnings: True if failing checks should be raised as warnings, not errors.
323
-
324
- Raises:
325
- InconsistentDatasetsError: If inconsistencies are found and `errors_as_warnings == False`
326
- """
327
- if failures := [result for result in results if not result["success"]]:
328
- msg = f"{INCONSISTENCIES_MESSAGE} {', '.join(str(f['name']) for f in failures)}"
329
- if errors_as_warnings:
330
- warnings.warn(
331
- message=msg,
332
- category=InconsistentDatasetsWarning,
333
- stacklevel=2,
334
- )
335
- else:
336
- raise InconsistentDatasetsError(
337
- msg,
338
- )
339
-
340
- @staticmethod
341
- def _merge_metadata(
342
- extracted_metadata: all_optional_model.DatadocMetadata | None,
343
- existing_metadata: OptionalDatadocMetadataType,
344
- ) -> all_optional_model.DatadocMetadata:
345
- if not existing_metadata:
346
- logger.warning(
347
- "No existing metadata found, no merge to perform. Continuing with extracted metadata.",
348
- )
349
- return extracted_metadata or all_optional_model.DatadocMetadata()
350
-
351
- if not extracted_metadata:
352
- return cast("all_optional_model.DatadocMetadata", existing_metadata)
353
-
354
- # Use the extracted metadata as a base
355
- merged_metadata = all_optional_model.DatadocMetadata(
356
- dataset=copy.deepcopy(extracted_metadata.dataset),
357
- variables=[],
358
- )
359
-
360
- override_dataset_fields(
361
- merged_metadata=merged_metadata,
362
- existing_metadata=cast(
363
- "all_optional_model.DatadocMetadata", existing_metadata
364
- ),
365
- )
366
-
367
- # Merge variables.
368
- # For each extracted variable, copy existing metadata into the merged metadata
369
- return merge_variables(
370
- existing_metadata=existing_metadata,
371
- extracted_metadata=extracted_metadata,
372
- merged_metadata=merged_metadata,
373
- )
374
-
375
229
  def _extract_metadata_from_existing_document(
376
230
  self,
377
231
  document: pathlib.Path | CloudPath,
@@ -591,9 +445,12 @@ class Datadoc:
591
445
  variable_short_name: str,
592
446
  pseudonymization: all_optional_model.Pseudonymization | None = None,
593
447
  ) -> None:
594
- """Adds a new pseudo variable to the list of pseudonymized variables also sets is_personal_data to true.
448
+ """Adds a new pseudo variable to the list of pseudonymized variables.
595
449
 
596
- If there is no pseudonymization supplied an empty Pseudonymization structure will be added to the model.
450
+ If `pseudonymization` is not supplied, an empty Pseudonymization structure
451
+ will be created and assigned to the variable.
452
+ If an encryption algorithm is recognized (one of the standard Dapla algorithms), default values are filled
453
+ for any missing fields.
597
454
 
598
455
  Args:
599
456
  variable_short_name: The short name for the variable that one wants to update the pseudo for.
@@ -601,15 +458,15 @@ class Datadoc:
601
458
 
602
459
  """
603
460
  variable = self.variables_lookup[variable_short_name]
604
- variable.pseudonymization = (
605
- pseudonymization or all_optional_model.Pseudonymization()
606
- )
461
+ if pseudonymization:
462
+ set_default_values_pseudonymization(variable, pseudonymization)
463
+ else:
464
+ variable.pseudonymization = all_optional_model.Pseudonymization()
607
465
 
608
466
  def remove_pseudonymization(self, variable_short_name: str) -> None:
609
467
  """Removes a pseudo variable by using the shortname.
610
468
 
611
469
  Updates the pseudo variable lookup by creating a new one.
612
- Sets is_personal_data to non pseudonymized encrypted personal data.
613
470
 
614
471
  Args:
615
472
  variable_short_name: The short name for the variable that one wants to remove the pseudo for.
@@ -9,7 +9,7 @@ DATE_VALIDATION_MESSAGE = f"{VALIDATION_ERROR}contains_data_from must be the sam
9
9
 
10
10
  OBLIGATORY_METADATA_WARNING = "Obligatory metadata is missing: "
11
11
 
12
- INCONSISTENCIES_MESSAGE = "Inconsistencies found between extracted and existing metadata. Inconsistencies are:"
12
+ INCONSISTENCIES_MESSAGE = "Inconsistencies found between extracted and existing metadata! This usually means that the new dataset has a different structure and that the version number should be incremented.\nDetails:"
13
13
 
14
14
  OBLIGATORY_DATASET_METADATA_IDENTIFIERS: list = [
15
15
  "assessment",
@@ -17,12 +17,9 @@ OBLIGATORY_DATASET_METADATA_IDENTIFIERS: list = [
17
17
  "dataset_status",
18
18
  "name",
19
19
  "description",
20
- "data_source",
21
20
  "population_description",
22
21
  "version",
23
22
  "version_description",
24
- "unit_type",
25
- "temporality_type",
26
23
  "subject_field",
27
24
  "spatial_coverage_description",
28
25
  "owner",
@@ -44,8 +41,18 @@ OBLIGATORY_VARIABLES_METADATA_IDENTIFIERS = [
44
41
  "data_type",
45
42
  "variable_role",
46
43
  "is_personal_data",
44
+ "unit_type",
45
+ "population_description",
46
+ "data_source",
47
+ "temporality_type",
48
+ ]
49
+
50
+ OBLIGATORY_VARIABLES_PESUODONYMIZATION_IDENTIFIERS = [
51
+ "encryption_algorithm",
52
+ "encryption_key_refrence",
47
53
  ]
48
54
 
55
+
49
56
  OBLIGATORY_VARIABLES_METADATA_IDENTIFIERS_MULTILANGUAGE = [
50
57
  "name",
51
58
  ]
@@ -90,3 +97,11 @@ METADATA_DOCUMENT_FILE_SUFFIX = "__DOC.json"
90
97
  DATADOC_STATISTICAL_SUBJECT_SOURCE_URL = (
91
98
  "https://www.ssb.no/xp/_/service/mimir/subjectStructurStatistics"
92
99
  )
100
+
101
+ PAPIS_STABLE_IDENTIFIER_TYPE = "FREG_SNR"
102
+ PAPIS_ENCRYPTION_KEY_REFERENCE = "papis-common-key-1"
103
+ DAEAD_ENCRYPTION_KEY_REFERENCE = "ssb-common-key-1"
104
+ ENCRYPTION_PARAMETER_SNAPSHOT_DATE = "snapshotDate"
105
+ ENCRYPTION_PARAMETER_KEY_ID = "keyId"
106
+ ENCRYPTION_PARAMETER_STRATEGY = "strategy"
107
+ ENCRYPTION_PARAMETER_STRATEGY_SKIP = "skip"
@@ -14,3 +14,10 @@ class SupportedLanguages(str, Enum):
14
14
  NORSK_BOKMÅL = "nb" # noqa: PLC2401 the listed problems do not apply in this case
15
15
  NORSK_NYNORSK = "nn"
16
16
  ENGLISH = "en"
17
+
18
+
19
+ class EncryptionAlgorithm(str, Enum):
20
+ """Encryption algorithm values for pseudonymization algoprithms offered on Dapla."""
21
+
22
+ PAPIS_ENCRYPTION_ALGORITHM = "TINK-FPE"
23
+ DAEAD_ENCRYPTION_ALGORITHM = "TINK-DAEAD"
@@ -4,25 +4,25 @@ import datetime # import is needed in xdoctest
4
4
  import logging
5
5
  import pathlib
6
6
  import uuid
7
+ from typing import Any
7
8
  from typing import TypeAlias
8
- from typing import cast
9
9
 
10
- import datadoc_model
11
10
  import datadoc_model.all_optional.model as all_optional_model
12
11
  import datadoc_model.required.model as required_model
13
12
  import google.auth
14
13
  from cloudpathlib import CloudPath
15
14
  from cloudpathlib import GSClient
16
15
  from cloudpathlib import GSPath
17
- from datadoc_model import model
18
16
  from datadoc_model.all_optional.model import Assessment
19
17
  from datadoc_model.all_optional.model import DataSetState
20
18
  from datadoc_model.all_optional.model import VariableRole
21
19
 
22
20
  from dapla_metadata.dapla import user_info
23
- from dapla_metadata.datasets.utility.constants import (
24
- DATASET_FIELDS_FROM_EXISTING_METADATA,
25
- )
21
+ from dapla_metadata.datasets.utility.constants import DAEAD_ENCRYPTION_KEY_REFERENCE
22
+ from dapla_metadata.datasets.utility.constants import ENCRYPTION_PARAMETER_KEY_ID
23
+ from dapla_metadata.datasets.utility.constants import ENCRYPTION_PARAMETER_SNAPSHOT_DATE
24
+ from dapla_metadata.datasets.utility.constants import ENCRYPTION_PARAMETER_STRATEGY
25
+ from dapla_metadata.datasets.utility.constants import ENCRYPTION_PARAMETER_STRATEGY_SKIP
26
26
  from dapla_metadata.datasets.utility.constants import NUM_OBLIGATORY_VARIABLES_FIELDS
27
27
  from dapla_metadata.datasets.utility.constants import (
28
28
  OBLIGATORY_DATASET_METADATA_IDENTIFIERS,
@@ -36,6 +36,9 @@ from dapla_metadata.datasets.utility.constants import (
36
36
  from dapla_metadata.datasets.utility.constants import (
37
37
  OBLIGATORY_VARIABLES_METADATA_IDENTIFIERS_MULTILANGUAGE,
38
38
  )
39
+ from dapla_metadata.datasets.utility.constants import PAPIS_ENCRYPTION_KEY_REFERENCE
40
+ from dapla_metadata.datasets.utility.constants import PAPIS_STABLE_IDENTIFIER_TYPE
41
+ from dapla_metadata.datasets.utility.enums import EncryptionAlgorithm
39
42
 
40
43
  logger = logging.getLogger(__name__)
41
44
 
@@ -43,9 +46,21 @@ DatadocMetadataType: TypeAlias = (
43
46
  all_optional_model.DatadocMetadata | required_model.DatadocMetadata
44
47
  )
45
48
  DatasetType: TypeAlias = all_optional_model.Dataset | required_model.Dataset
49
+ VariableType: TypeAlias = all_optional_model.Variable | required_model.Variable
50
+ PseudonymizationType: TypeAlias = (
51
+ all_optional_model.Pseudonymization | required_model.Pseudonymization
52
+ )
53
+ VariableListType: TypeAlias = (
54
+ list[all_optional_model.Variable] | list[required_model.Variable]
55
+ )
46
56
  OptionalDatadocMetadataType: TypeAlias = DatadocMetadataType | None
47
57
 
48
58
 
59
+ def get_current_date() -> str:
60
+ """Return a current date as str."""
61
+ return datetime.datetime.now(tz=datetime.timezone.utc).date().isoformat()
62
+
63
+
49
64
  def get_timestamp_now() -> datetime.datetime:
50
65
  """Return a timestamp for the current moment."""
51
66
  return datetime.datetime.now(tz=datetime.timezone.utc)
@@ -110,7 +125,7 @@ def set_default_values_variables(variables: list) -> None:
110
125
  variables: A list of variable objects to set default values on.
111
126
 
112
127
  Example:
113
- >>> variables = [model.Variable(short_name="pers",id=None, is_personal_data = None), model.Variable(short_name="fnr",id='9662875c-c245-41de-b667-12ad2091a1ee', is_personal_data=True)]
128
+ >>> variables = [all_optional_model.Variable(short_name="pers",id=None, is_personal_data = None), all_optional_model.Variable(short_name="fnr",id='9662875c-c245-41de-b667-12ad2091a1ee', is_personal_data=True)]
114
129
  >>> set_default_values_variables(variables)
115
130
  >>> isinstance(variables[0].id, uuid.UUID)
116
131
  True
@@ -139,7 +154,7 @@ def set_default_values_dataset(
139
154
  dataset: The dataset object to set default values on.
140
155
 
141
156
  Example:
142
- >>> dataset = model.Dataset(id=None)
157
+ >>> dataset = all_optional_model.Dataset(id=None)
143
158
  >>> set_default_values_dataset(dataset)
144
159
  >>> dataset.id is not None
145
160
  True
@@ -177,8 +192,8 @@ def set_variables_inherit_from_dataset(
177
192
  variables: A list of variable objects to update with dataset values.
178
193
 
179
194
  Example:
180
- >>> dataset = model.Dataset(short_name='person_data_v1', id='9662875c-c245-41de-b667-12ad2091a1ee', contains_data_from="2010-09-05", contains_data_until="2022-09-05")
181
- >>> variables = [model.Variable(short_name="pers", data_source=None, temporality_type=None, contains_data_from=None, contains_data_until=None)]
195
+ >>> dataset = all_optional_model.Dataset(short_name='person_data_v1', id='9662875c-c245-41de-b667-12ad2091a1ee', contains_data_from="2010-09-05", contains_data_until="2022-09-05")
196
+ >>> variables = [all_optional_model.Variable(short_name="pers", data_source=None, temporality_type=None, contains_data_from=None, contains_data_until=None)]
182
197
  >>> set_variables_inherit_from_dataset(dataset, variables)
183
198
 
184
199
  >>> variables[0].contains_data_from == dataset.contains_data_from
@@ -324,7 +339,9 @@ def num_obligatory_variables_fields_completed(variables: list) -> int:
324
339
  return num_completed
325
340
 
326
341
 
327
- def num_obligatory_variable_fields_completed(variable: model.Variable) -> int:
342
+ def num_obligatory_variable_fields_completed(
343
+ variable: all_optional_model.Variable,
344
+ ) -> int:
328
345
  """Count the number of obligatory fields completed for one variable.
329
346
 
330
347
  This function calculates the total number of obligatory fields that have
@@ -428,85 +445,64 @@ def running_in_notebook() -> bool:
428
445
  return False
429
446
 
430
447
 
431
- def override_dataset_fields(
432
- merged_metadata: all_optional_model.DatadocMetadata,
433
- existing_metadata: all_optional_model.DatadocMetadata
434
- | required_model.DatadocMetadata,
435
- ) -> None:
436
- """Overrides specific fields in the dataset of `merged_metadata` with values from the dataset of `existing_metadata`.
448
+ def _ensure_encryption_parameters(
449
+ existing: list[dict[str, Any]] | None,
450
+ required: dict[str, Any],
451
+ ) -> list[dict[str, Any]]:
452
+ """Ensure required key/value pairs exist in parameters list."""
453
+ result = list(existing or [])
437
454
 
438
- This function iterates over a predefined list of fields, `DATASET_FIELDS_FROM_EXISTING_METADATA`,
439
- and sets the corresponding fields in the `merged_metadata.dataset` object to the values
440
- from the `existing_metadata.dataset` object.
455
+ # Ensure each required key is present in at least one dict
456
+ for key, value in required.items():
457
+ if not any(key in d for d in result):
458
+ result.append({key: value})
441
459
 
442
- Args:
443
- merged_metadata: An instance of `DatadocMetadata` containing the dataset to be updated.
444
- existing_metadata: An instance of `DatadocMetadata` containing the dataset whose values are used to update `merged_metadata.dataset`.
460
+ return result
445
461
 
446
- Returns:
447
- `None`.
448
- """
449
- if merged_metadata.dataset and existing_metadata.dataset:
450
- # Override the fields as defined
451
- for field in DATASET_FIELDS_FROM_EXISTING_METADATA:
452
- setattr(
453
- merged_metadata.dataset,
454
- field,
455
- getattr(existing_metadata.dataset, field),
456
- )
457
-
458
-
459
- def merge_variables(
460
- existing_metadata: OptionalDatadocMetadataType,
461
- extracted_metadata: all_optional_model.DatadocMetadata,
462
- merged_metadata: all_optional_model.DatadocMetadata,
463
- ) -> all_optional_model.DatadocMetadata:
464
- """Merges variables from the extracted metadata into the existing metadata and updates the merged metadata.
465
462
 
466
- This function compares the variables from `extracted_metadata` with those in `existing_metadata`.
467
- For each variable in `extracted_metadata`, it checks if a variable with the same `short_name` exists
468
- in `existing_metadata`. If a match is found, it updates the existing variable with information from
469
- `extracted_metadata`. If no match is found, the variable from `extracted_metadata` is directly added to `merged_metadata`.
470
-
471
- Args:
472
- existing_metadata: The metadata object containing the current state of variables.
473
- extracted_metadata: The metadata object containing new or updated variables to merge.
474
- merged_metadata: The metadata object that will contain the result of the merge.
463
+ def set_default_values_pseudonymization(
464
+ variable: VariableType,
465
+ pseudonymization: PseudonymizationType | None,
466
+ ) -> None:
467
+ """Populate pseudonymization fields with defaults based on the encryption algorithm.
475
468
 
476
- Returns:
477
- all_optional_model.DatadocMetadata: The `merged_metadata` object containing variables from both `existing_metadata`
478
- and `extracted_metadata`.
469
+ Updates the encryption key reference and encryption parameters if they are not set,
470
+ handling both PAPIS and DAED algorithms. Leaves unknown algorithms unchanged.
479
471
  """
480
- if (
481
- existing_metadata is not None
482
- and existing_metadata.variables is not None
483
- and extracted_metadata is not None
484
- and extracted_metadata.variables is not None
485
- and merged_metadata.variables is not None
486
- ):
487
- for extracted in extracted_metadata.variables:
488
- existing = next(
489
- (
490
- existing
491
- for existing in existing_metadata.variables
492
- if existing.short_name == extracted.short_name
493
- ),
494
- None,
495
- )
496
- if existing:
497
- existing.id = (
498
- None # Set to None so that it will be set assigned a fresh ID later
472
+ if pseudonymization is None:
473
+ return
474
+ if variable.pseudonymization is None:
475
+ variable.pseudonymization = pseudonymization
476
+ match pseudonymization.encryption_algorithm:
477
+ case EncryptionAlgorithm.PAPIS_ENCRYPTION_ALGORITHM.value:
478
+ if not pseudonymization.encryption_key_reference:
479
+ pseudonymization.encryption_key_reference = (
480
+ PAPIS_ENCRYPTION_KEY_REFERENCE
499
481
  )
500
- existing.contains_data_from = (
501
- extracted.contains_data_from or existing.contains_data_from
482
+ base_params = {
483
+ ENCRYPTION_PARAMETER_KEY_ID: PAPIS_ENCRYPTION_KEY_REFERENCE,
484
+ ENCRYPTION_PARAMETER_STRATEGY: ENCRYPTION_PARAMETER_STRATEGY_SKIP,
485
+ }
486
+ if pseudonymization.stable_identifier_type == PAPIS_STABLE_IDENTIFIER_TYPE:
487
+ base_params[ENCRYPTION_PARAMETER_SNAPSHOT_DATE] = get_current_date()
488
+ pseudonymization.encryption_algorithm_parameters = (
489
+ _ensure_encryption_parameters(
490
+ pseudonymization.encryption_algorithm_parameters,
491
+ base_params,
502
492
  )
503
- existing.contains_data_until = (
504
- extracted.contains_data_until or existing.contains_data_until
493
+ )
494
+ case EncryptionAlgorithm.DAEAD_ENCRYPTION_ALGORITHM.value:
495
+ if not pseudonymization.encryption_key_reference:
496
+ pseudonymization.encryption_key_reference = (
497
+ DAEAD_ENCRYPTION_KEY_REFERENCE
505
498
  )
506
- merged_metadata.variables.append(
507
- cast("datadoc_model.all_optional.model.Variable", existing)
499
+ pseudonymization.encryption_algorithm_parameters = (
500
+ _ensure_encryption_parameters(
501
+ pseudonymization.encryption_algorithm_parameters,
502
+ {
503
+ ENCRYPTION_PARAMETER_KEY_ID: DAEAD_ENCRYPTION_KEY_REFERENCE,
504
+ },
508
505
  )
509
- else:
510
- # If there is no existing metadata for this variable, we just use what we have extracted
511
- merged_metadata.variables.append(extracted)
512
- return merged_metadata
506
+ )
507
+ case _:
508
+ pass
@@ -49,6 +49,10 @@ from dapla_metadata.variable_definitions.exceptions import vardef_file_error_han
49
49
 
50
50
  logger = logging.getLogger(__name__)
51
51
 
52
+ IDENTICAL_PATCH_ERROR_MESSAGE = (
53
+ "No changes detected in supported fields. Not creating identical patch."
54
+ )
55
+
52
56
 
53
57
  class VariableDefinition(CompleteResponse):
54
58
  """A Variable Definition.
@@ -274,11 +278,14 @@ class VariableDefinition(CompleteResponse):
274
278
  Returns:
275
279
  VariableDefinition: Variable Definition with all details.
276
280
  """
281
+ new_patch = _read_file_to_model(
282
+ file_path or self.get_file_path(),
283
+ Patch,
284
+ )
285
+ if new_patch == Patch.from_dict(self.to_dict()):
286
+ raise ValueError(IDENTICAL_PATCH_ERROR_MESSAGE)
277
287
  return self.create_patch(
278
- patch=_read_file_to_model(
279
- file_path or self.get_file_path(),
280
- Patch,
281
- ),
288
+ patch=new_patch,
282
289
  valid_from=valid_from,
283
290
  )
284
291
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dapla-toolbelt-metadata
3
- Version: 0.9.1
3
+ Version: 0.9.3
4
4
  Summary: Dapla Toolbelt Metadata
5
5
  Project-URL: homepage, https://github.com/statisticsnorway/dapla-toolbelt-metadata
6
6
  Project-URL: repository, https://github.com/statisticsnorway/dapla-toolbelt-metadata
@@ -5,9 +5,10 @@ dapla_metadata/_shared/enums.py,sha256=WHkH1d8xw41gOly6au_izZB1_-6XTcKu5rhBWUImj
5
5
  dapla_metadata/_shared/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  dapla_metadata/dapla/__init__.py,sha256=tkapF-YwmruPPrKvN3pEoCZqb7xvJx_ogBM8XyGMuJI,130
7
7
  dapla_metadata/dapla/user_info.py,sha256=bENez-ICt9ySR8orYebO68Q3_2LkIW9QTL58DTctmEQ,4833
8
- dapla_metadata/datasets/__init__.py,sha256=TvzskpdFC6hGcC9_55URT5jr5wNAPzXuISd2UjJWM_8,280
8
+ dapla_metadata/datasets/__init__.py,sha256=an-REJgi7N8-S1SCz-MYO_8as6fMe03WvhjRP_hWWkg,293
9
+ dapla_metadata/datasets/_merge.py,sha256=Tk5wQz6xZGr8veUAHZb42O8HARU8ObBJ_E4afvVWdlo,12993
9
10
  dapla_metadata/datasets/code_list.py,sha256=JtCE-5Q8grAKvkn0KKjzeGhO-96O7yGsastbuoakreg,9057
10
- dapla_metadata/datasets/core.py,sha256=jwnV6kqS7GpS_9kVWbFz7J0TE-TiSSiqq_gSV4sE628,25774
11
+ dapla_metadata/datasets/core.py,sha256=p-2OJsAEWCUqBlzn0YIYkK-pAgtvMROdoxXvCyjfWYs,20434
11
12
  dapla_metadata/datasets/dapla_dataset_path_info.py,sha256=WPeV_mwKk2B9sXd14SaP-kTb1bOQ_8W2KtrqOG7sJIY,26867
12
13
  dapla_metadata/datasets/dataset_parser.py,sha256=3dtRXNy1C8SfG8zTYWdY26nV4l-dG25IC_0J5t2bYwI,8285
13
14
  dapla_metadata/datasets/model_validation.py,sha256=pGT-jqaQQY4z7jz-7UQd0BQoTWDxDWPYAnDoRC2vd_c,6818
@@ -20,9 +21,9 @@ dapla_metadata/datasets/compatibility/model_backwards_compatibility.py,sha256=W5
20
21
  dapla_metadata/datasets/external_sources/__init__.py,sha256=qvIdXwqyEmXNUCB94ZtZXRzifdW4hiXASFFPtC70f6E,83
21
22
  dapla_metadata/datasets/external_sources/external_sources.py,sha256=9eIcOIUbaodNX1w9Tj2wl4U4wUmr5kF1R0i01fKUzGs,2974
22
23
  dapla_metadata/datasets/utility/__init__.py,sha256=pp6tUcgUbo8iq9OPtFKQrTbLuI3uY7NHptwWSTpasOU,33
23
- dapla_metadata/datasets/utility/constants.py,sha256=f9TfBN5aJbiKBQVpu8Whc0X-EMpXv43-Yu2L4KUDA4U,2353
24
- dapla_metadata/datasets/utility/enums.py,sha256=SpV4xlmP1YMaJPbmX03hqRLHUOhXIk5gquTeJ8G_5OE,432
25
- dapla_metadata/datasets/utility/utils.py,sha256=AjPWlg_8DJpqFQ8B2MaVJrB62EZ3dilvfRfc0DImjQI,18499
24
+ dapla_metadata/datasets/utility/constants.py,sha256=94nGISL96rHvAndjHyaQEaJXNBnPAiRJN1slUaB03gM,2933
25
+ dapla_metadata/datasets/utility/enums.py,sha256=i6dcxWya5k4LjLdGGIM_H37rRndizug3peaAgoE5UdM,652
26
+ dapla_metadata/datasets/utility/utils.py,sha256=85Ms6jEcUuQUm-RRosscDVpvA5W4TOqiOZo2LAnXjFA,18301
26
27
  dapla_metadata/standards/__init__.py,sha256=n8jnMrudLuScSdfQ4UMJorc-Ptg3Y1-ilT8zAaQnM70,179
27
28
  dapla_metadata/standards/name_validator.py,sha256=6-DQE_EKVd6UjL--EXpFcZDQtusVbSFaWaUY-CfOV2c,9184
28
29
  dapla_metadata/standards/standard_validators.py,sha256=tcCiCI76wUVtMzXA2oCgdauZc0uGgUi11FKu-t7KGwQ,3767
@@ -33,7 +34,7 @@ dapla_metadata/variable_definitions/exceptions.py,sha256=ImB81bne-h45kX9lE5hIh80
33
34
  dapla_metadata/variable_definitions/vardef.py,sha256=WUpiKfvgFGPhMdjYSFSmdlXQKAolmRgW4-t-EocddQs,13934
34
35
  dapla_metadata/variable_definitions/vardok_id.py,sha256=8T23BUHyVQr5hovTVc2E4HVY7f7e_jdi3YL1qzMQgFw,1268
35
36
  dapla_metadata/variable_definitions/vardok_vardef_id_pair.py,sha256=8MDdd2-9L30MXkoQrk7NDcueaoxdeYie-TJhgoskTzk,1389
36
- dapla_metadata/variable_definitions/variable_definition.py,sha256=uSWvSuVDh5zmSXGUb7vitiNd0VThU5DzFV3Rd6gumYE,14620
37
+ dapla_metadata/variable_definitions/variable_definition.py,sha256=tsTo_BObHgppSNf6lGbt_Vh88PypPF4pwA-vWDgFy9A,14869
37
38
  dapla_metadata/variable_definitions/_generated/.openapi-generator-ignore,sha256=x9lryVB5wtVEuKQ5GcZ94b10RgtkVXbtvWXOArO1XsM,169
38
39
  dapla_metadata/variable_definitions/_generated/README.md,sha256=Y4et1oAhZTCr7a-CZfLbIpyYnhKzpygNg-gj7qJ09Eg,7650
39
40
  dapla_metadata/variable_definitions/_generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -89,7 +90,7 @@ dapla_metadata/variable_definitions/_utils/constants.py,sha256=zr5FNVCEz6TM9PVEr
89
90
  dapla_metadata/variable_definitions/_utils/files.py,sha256=JbPgPNQ7iA38juMqGEdcg5OjZZUwCb6NQtPL0AEspD0,10933
90
91
  dapla_metadata/variable_definitions/_utils/template_files.py,sha256=7fcc7yEHOl5JUZ698kqj4IiikXPHBi3SrAVOk4wqQtw,3308
91
92
  dapla_metadata/variable_definitions/_utils/variable_definition_files.py,sha256=sGhcSpckR9NtYGNh2oVkiCd5SI3bbJEBhc1PA2uShs0,4701
92
- dapla_toolbelt_metadata-0.9.1.dist-info/METADATA,sha256=aExcayYhHGSrYtImpcv7e5vZFFvAaSuAtbpheyCOeN8,4723
93
- dapla_toolbelt_metadata-0.9.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
94
- dapla_toolbelt_metadata-0.9.1.dist-info/licenses/LICENSE,sha256=np3IfD5m0ZUofn_kVzDZqliozuiO6wrktw3LRPjyEiI,1073
95
- dapla_toolbelt_metadata-0.9.1.dist-info/RECORD,,
93
+ dapla_toolbelt_metadata-0.9.3.dist-info/METADATA,sha256=aCJzU5NSK7_yY9lu5R9KnO-cE6P6o_-gc_32CS6qeKU,4723
94
+ dapla_toolbelt_metadata-0.9.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
95
+ dapla_toolbelt_metadata-0.9.3.dist-info/licenses/LICENSE,sha256=np3IfD5m0ZUofn_kVzDZqliozuiO6wrktw3LRPjyEiI,1073
96
+ dapla_toolbelt_metadata-0.9.3.dist-info/RECORD,,