dapla-toolbelt-metadata 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dapla-toolbelt-metadata might be problematic. Click here for more details.
- dapla_metadata/__init__.py +3 -0
- {dataset → dapla_metadata/datasets}/__init__.py +1 -3
- {dataset → dapla_metadata/datasets}/code_list.py +2 -2
- dapla_metadata/datasets/config.py +82 -0
- {dataset → dapla_metadata/datasets}/core.py +72 -91
- {dataset → dapla_metadata/datasets}/dataset_parser.py +1 -1
- {dataset → dapla_metadata/datasets}/model_validation.py +17 -11
- {dataset → dapla_metadata/datasets}/statistic_subject_mapping.py +9 -8
- {dataset → dapla_metadata/datasets}/user_info.py +3 -3
- {dataset → dapla_metadata/datasets}/utility/constants.py +4 -0
- {dataset → dapla_metadata/datasets}/utility/utils.py +92 -5
- {dapla_toolbelt_metadata-0.1.1.dist-info → dapla_toolbelt_metadata-0.2.0.dist-info}/METADATA +8 -19
- dapla_toolbelt_metadata-0.2.0.dist-info/RECORD +22 -0
- dapla_toolbelt_metadata-0.1.1.dist-info/RECORD +0 -21
- dataset/config.py +0 -151
- {dataset → dapla_metadata/datasets}/dapla_dataset_path_info.py +0 -0
- {dataset → dapla_metadata/datasets}/external_sources/__init__.py +0 -0
- {dataset → dapla_metadata/datasets}/external_sources/external_sources.py +0 -0
- {dataset → dapla_metadata/datasets}/model_backwards_compatibility.py +0 -0
- {dataset → dapla_metadata/datasets}/py.typed +0 -0
- {dataset → dapla_metadata/datasets}/utility/__init__.py +0 -0
- {dataset → dapla_metadata/datasets}/utility/enums.py +0 -0
- {dapla_toolbelt_metadata-0.1.1.dist-info → dapla_toolbelt_metadata-0.2.0.dist-info}/LICENSE +0 -0
- {dapla_toolbelt_metadata-0.1.1.dist-info → dapla_toolbelt_metadata-0.2.0.dist-info}/WHEEL +0 -0
|
@@ -6,6 +6,4 @@ from .core import Datadoc
|
|
|
6
6
|
from .dapla_dataset_path_info import DaplaDatasetPathInfo
|
|
7
7
|
from .model_validation import ObligatoryDatasetWarning
|
|
8
8
|
from .model_validation import ObligatoryVariableWarning
|
|
9
|
-
from .utility
|
|
10
|
-
from .utility.enums import DaplaService
|
|
11
|
-
from .utility.enums import SupportedLanguages
|
|
9
|
+
from .utility import enums
|
|
@@ -4,8 +4,8 @@ import logging
|
|
|
4
4
|
from dataclasses import dataclass
|
|
5
5
|
from typing import TYPE_CHECKING
|
|
6
6
|
|
|
7
|
-
from
|
|
8
|
-
from
|
|
7
|
+
from dapla_metadata.datasets.external_sources.external_sources import GetExternalSource
|
|
8
|
+
from dapla_metadata.datasets.utility.enums import SupportedLanguages
|
|
9
9
|
|
|
10
10
|
if TYPE_CHECKING:
|
|
11
11
|
from concurrent.futures import ThreadPoolExecutor
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
"""Configuration management for dataset package."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import os
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from pprint import pformat
|
|
9
|
+
|
|
10
|
+
from dotenv import dotenv_values
|
|
11
|
+
from dotenv import load_dotenv
|
|
12
|
+
|
|
13
|
+
from dapla_metadata.datasets.utility.constants import (
|
|
14
|
+
DATADOC_STATISTICAL_SUBJECT_SOURCE_URL,
|
|
15
|
+
)
|
|
16
|
+
from dapla_metadata.datasets.utility.enums import DaplaRegion
|
|
17
|
+
from dapla_metadata.datasets.utility.enums import DaplaService
|
|
18
|
+
|
|
19
|
+
logging.basicConfig(level=logging.DEBUG, force=True)
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
DOT_ENV_FILE_PATH = Path(__file__).parent.joinpath(".env")
|
|
24
|
+
|
|
25
|
+
JUPYTERHUB_USER = "JUPYTERHUB_USER"
|
|
26
|
+
DAPLA_REGION = "DAPLA_REGION"
|
|
27
|
+
DAPLA_SERVICE = "DAPLA_SERVICE"
|
|
28
|
+
|
|
29
|
+
env_loaded = False
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _load_dotenv_file() -> None:
|
|
33
|
+
global env_loaded # noqa: PLW0603
|
|
34
|
+
if not env_loaded and DOT_ENV_FILE_PATH.exists():
|
|
35
|
+
load_dotenv(DOT_ENV_FILE_PATH)
|
|
36
|
+
env_loaded = True
|
|
37
|
+
logger.info(
|
|
38
|
+
"Loaded .env file with config keys: \n%s",
|
|
39
|
+
pformat(list(dotenv_values(DOT_ENV_FILE_PATH).keys())),
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def _get_config_item(item: str) -> str | None:
|
|
44
|
+
"""Get a config item. Makes sure all access is logged."""
|
|
45
|
+
_load_dotenv_file()
|
|
46
|
+
value = os.getenv(item)
|
|
47
|
+
logger.debug("Config accessed. %s", item)
|
|
48
|
+
return value
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def get_jupyterhub_user() -> str | None:
|
|
52
|
+
"""Get the JupyterHub user name."""
|
|
53
|
+
return _get_config_item(JUPYTERHUB_USER)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def get_statistical_subject_source_url() -> str | None:
|
|
57
|
+
"""Get the URL to the statistical subject source."""
|
|
58
|
+
return (
|
|
59
|
+
_get_config_item("DATADOC_STATISTICAL_SUBJECT_SOURCE_URL")
|
|
60
|
+
or DATADOC_STATISTICAL_SUBJECT_SOURCE_URL
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def get_dapla_region() -> DaplaRegion | None:
|
|
65
|
+
"""Get the Dapla region we're running on."""
|
|
66
|
+
if region := _get_config_item(DAPLA_REGION):
|
|
67
|
+
return DaplaRegion(region)
|
|
68
|
+
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def get_dapla_service() -> DaplaService | None:
|
|
73
|
+
"""Get the Dapla service we're running on."""
|
|
74
|
+
if service := _get_config_item(DAPLA_SERVICE):
|
|
75
|
+
return DaplaService(service)
|
|
76
|
+
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def get_oidc_token() -> str | None:
|
|
81
|
+
"""Get the JWT token from the environment."""
|
|
82
|
+
return _get_config_item("OIDC_TOKEN")
|
|
@@ -13,28 +13,37 @@ from typing import TYPE_CHECKING
|
|
|
13
13
|
from datadoc_model import model
|
|
14
14
|
from datadoc_model.model import DataSetStatus
|
|
15
15
|
|
|
16
|
-
from
|
|
17
|
-
from
|
|
18
|
-
from
|
|
19
|
-
from
|
|
20
|
-
from
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
from
|
|
24
|
-
from
|
|
25
|
-
from
|
|
26
|
-
from
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
from
|
|
30
|
-
from
|
|
31
|
-
from
|
|
32
|
-
from
|
|
33
|
-
from
|
|
34
|
-
from
|
|
35
|
-
from
|
|
36
|
-
from
|
|
37
|
-
from
|
|
16
|
+
from dapla_metadata.datasets import config
|
|
17
|
+
from dapla_metadata.datasets import user_info
|
|
18
|
+
from dapla_metadata.datasets.dapla_dataset_path_info import DaplaDatasetPathInfo
|
|
19
|
+
from dapla_metadata.datasets.dataset_parser import DatasetParser
|
|
20
|
+
from dapla_metadata.datasets.model_backwards_compatibility import (
|
|
21
|
+
is_metadata_in_container_structure,
|
|
22
|
+
)
|
|
23
|
+
from dapla_metadata.datasets.model_backwards_compatibility import upgrade_metadata
|
|
24
|
+
from dapla_metadata.datasets.model_validation import ValidateDatadocMetadata
|
|
25
|
+
from dapla_metadata.datasets.statistic_subject_mapping import StatisticSubjectMapping
|
|
26
|
+
from dapla_metadata.datasets.utility.constants import (
|
|
27
|
+
DEFAULT_SPATIAL_COVERAGE_DESCRIPTION,
|
|
28
|
+
)
|
|
29
|
+
from dapla_metadata.datasets.utility.constants import INCONSISTENCIES_MESSAGE
|
|
30
|
+
from dapla_metadata.datasets.utility.constants import METADATA_DOCUMENT_FILE_SUFFIX
|
|
31
|
+
from dapla_metadata.datasets.utility.constants import NUM_OBLIGATORY_DATASET_FIELDS
|
|
32
|
+
from dapla_metadata.datasets.utility.constants import NUM_OBLIGATORY_VARIABLES_FIELDS
|
|
33
|
+
from dapla_metadata.datasets.utility.utils import calculate_percentage
|
|
34
|
+
from dapla_metadata.datasets.utility.utils import derive_assessment_from_state
|
|
35
|
+
from dapla_metadata.datasets.utility.utils import get_timestamp_now
|
|
36
|
+
from dapla_metadata.datasets.utility.utils import merge_variables
|
|
37
|
+
from dapla_metadata.datasets.utility.utils import normalize_path
|
|
38
|
+
from dapla_metadata.datasets.utility.utils import (
|
|
39
|
+
num_obligatory_dataset_fields_completed,
|
|
40
|
+
)
|
|
41
|
+
from dapla_metadata.datasets.utility.utils import (
|
|
42
|
+
num_obligatory_variables_fields_completed,
|
|
43
|
+
)
|
|
44
|
+
from dapla_metadata.datasets.utility.utils import override_dataset_fields
|
|
45
|
+
from dapla_metadata.datasets.utility.utils import set_default_values_dataset
|
|
46
|
+
from dapla_metadata.datasets.utility.utils import set_default_values_variables
|
|
38
47
|
|
|
39
48
|
if TYPE_CHECKING:
|
|
40
49
|
import pathlib
|
|
@@ -138,10 +147,11 @@ class Datadoc:
|
|
|
138
147
|
"""
|
|
139
148
|
extracted_metadata: model.DatadocMetadata | None = None
|
|
140
149
|
existing_metadata: model.DatadocMetadata | None = None
|
|
141
|
-
if self.metadata_document
|
|
150
|
+
if self.metadata_document and self.metadata_document.exists():
|
|
142
151
|
existing_metadata = self._extract_metadata_from_existing_document(
|
|
143
152
|
self.metadata_document,
|
|
144
153
|
)
|
|
154
|
+
|
|
145
155
|
if (
|
|
146
156
|
self.dataset_path is not None
|
|
147
157
|
and self.dataset == model.Dataset()
|
|
@@ -157,14 +167,7 @@ class Datadoc:
|
|
|
157
167
|
and extracted_metadata is not None
|
|
158
168
|
and existing_metadata is not None
|
|
159
169
|
):
|
|
160
|
-
|
|
161
|
-
extracted_metadata.dataset is not None
|
|
162
|
-
and extracted_metadata.dataset.file_path is not None
|
|
163
|
-
):
|
|
164
|
-
existing_file_path = extracted_metadata.dataset.file_path
|
|
165
|
-
else:
|
|
166
|
-
msg = "Could not access existing dataset file path"
|
|
167
|
-
raise ValueError(msg)
|
|
170
|
+
existing_file_path = self._get_existing_file_path(extracted_metadata)
|
|
168
171
|
self._check_ready_to_merge(
|
|
169
172
|
self.dataset_path,
|
|
170
173
|
Path(existing_file_path),
|
|
@@ -181,31 +184,39 @@ class Datadoc:
|
|
|
181
184
|
self.metadata_document = self.build_metadata_document_path(
|
|
182
185
|
self.dataset_path,
|
|
183
186
|
)
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
187
|
+
self._set_metadata(merged_metadata)
|
|
188
|
+
else:
|
|
189
|
+
self._set_metadata(existing_metadata or extracted_metadata)
|
|
190
|
+
set_default_values_variables(self.variables)
|
|
191
|
+
set_default_values_dataset(self.dataset)
|
|
192
|
+
self._create_variables_lookup()
|
|
193
|
+
|
|
194
|
+
def _get_existing_file_path(
|
|
195
|
+
self,
|
|
196
|
+
extracted_metadata: model.DatadocMetadata | None,
|
|
197
|
+
) -> str:
|
|
198
|
+
if (
|
|
199
|
+
extracted_metadata is not None
|
|
200
|
+
and extracted_metadata.dataset is not None
|
|
201
|
+
and extracted_metadata.dataset.file_path is not None
|
|
194
202
|
):
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
203
|
+
return extracted_metadata.dataset.file_path
|
|
204
|
+
msg = "Could not access existing dataset file path"
|
|
205
|
+
raise ValueError(msg)
|
|
206
|
+
|
|
207
|
+
def _set_metadata(
|
|
208
|
+
self,
|
|
209
|
+
merged_metadata: model.DatadocMetadata | None,
|
|
210
|
+
) -> None:
|
|
211
|
+
if not merged_metadata or not (
|
|
212
|
+
merged_metadata.dataset and merged_metadata.variables
|
|
201
213
|
):
|
|
202
|
-
self.dataset = extracted_metadata.dataset
|
|
203
|
-
self.variables = extracted_metadata.variables
|
|
204
|
-
else:
|
|
205
214
|
msg = "Could not read metadata"
|
|
206
215
|
raise ValueError(msg)
|
|
207
|
-
|
|
208
|
-
|
|
216
|
+
self.dataset = merged_metadata.dataset
|
|
217
|
+
self.variables = merged_metadata.variables
|
|
218
|
+
|
|
219
|
+
def _create_variables_lookup(self) -> None:
|
|
209
220
|
self.variables_lookup = {
|
|
210
221
|
v.short_name: v for v in self.variables if v.short_name
|
|
211
222
|
}
|
|
@@ -300,55 +311,25 @@ class Datadoc:
|
|
|
300
311
|
"No existing metadata found, no merge to perform. Continuing with extracted metadata.",
|
|
301
312
|
)
|
|
302
313
|
return extracted_metadata or model.DatadocMetadata()
|
|
314
|
+
|
|
303
315
|
if not extracted_metadata:
|
|
304
316
|
return existing_metadata
|
|
317
|
+
|
|
305
318
|
# Use the extracted metadata as a base
|
|
306
319
|
merged_metadata = model.DatadocMetadata(
|
|
307
320
|
dataset=copy.deepcopy(extracted_metadata.dataset),
|
|
308
321
|
variables=[],
|
|
309
322
|
)
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
and existing_metadata.dataset is not None
|
|
313
|
-
):
|
|
314
|
-
# Override the fields as defined
|
|
315
|
-
for field in DATASET_FIELDS_FROM_EXISTING_METADATA:
|
|
316
|
-
setattr(
|
|
317
|
-
merged_metadata.dataset,
|
|
318
|
-
field,
|
|
319
|
-
getattr(existing_metadata.dataset, field),
|
|
320
|
-
)
|
|
323
|
+
|
|
324
|
+
override_dataset_fields(merged_metadata, extracted_metadata)
|
|
321
325
|
|
|
322
326
|
# Merge variables.
|
|
323
327
|
# For each extracted variable, copy existing metadata into the merged metadata
|
|
324
|
-
|
|
325
|
-
existing_metadata
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
):
|
|
330
|
-
for extracted in extracted_metadata.variables:
|
|
331
|
-
existing = next(
|
|
332
|
-
(
|
|
333
|
-
existing
|
|
334
|
-
for existing in existing_metadata.variables
|
|
335
|
-
if existing.short_name == extracted.short_name
|
|
336
|
-
),
|
|
337
|
-
None,
|
|
338
|
-
)
|
|
339
|
-
if existing:
|
|
340
|
-
existing.id = None # Set to None so that it will be set assigned a fresh ID later
|
|
341
|
-
existing.contains_data_from = (
|
|
342
|
-
extracted.contains_data_from or existing.contains_data_from
|
|
343
|
-
)
|
|
344
|
-
existing.contains_data_until = (
|
|
345
|
-
extracted.contains_data_until or existing.contains_data_until
|
|
346
|
-
)
|
|
347
|
-
merged_metadata.variables.append(existing)
|
|
348
|
-
else:
|
|
349
|
-
# If there is no existing metadata for this variable, we just use what we have extracted
|
|
350
|
-
merged_metadata.variables.append(extracted)
|
|
351
|
-
return merged_metadata
|
|
328
|
+
return merge_variables(
|
|
329
|
+
existing_metadata,
|
|
330
|
+
extracted_metadata,
|
|
331
|
+
merged_metadata,
|
|
332
|
+
)
|
|
352
333
|
|
|
353
334
|
def _extract_metadata_from_existing_document(
|
|
354
335
|
self,
|
|
@@ -19,7 +19,7 @@ from datadoc_model.model import LanguageStringTypeItem
|
|
|
19
19
|
from datadoc_model.model import Variable
|
|
20
20
|
from pyarrow import parquet as pq
|
|
21
21
|
|
|
22
|
-
from
|
|
22
|
+
from dapla_metadata.datasets.utility.enums import SupportedLanguages
|
|
23
23
|
|
|
24
24
|
if TYPE_CHECKING:
|
|
25
25
|
import pyarrow as pa
|
|
@@ -11,17 +11,23 @@ from datadoc_model import model
|
|
|
11
11
|
from pydantic import model_validator
|
|
12
12
|
from typing_extensions import Self
|
|
13
13
|
|
|
14
|
-
from
|
|
15
|
-
from
|
|
16
|
-
from
|
|
17
|
-
from
|
|
18
|
-
from
|
|
19
|
-
from
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
from
|
|
23
|
-
from
|
|
24
|
-
from
|
|
14
|
+
from dapla_metadata.datasets.utility.constants import DATE_VALIDATION_MESSAGE
|
|
15
|
+
from dapla_metadata.datasets.utility.constants import NUM_OBLIGATORY_DATASET_FIELDS
|
|
16
|
+
from dapla_metadata.datasets.utility.constants import NUM_OBLIGATORY_VARIABLES_FIELDS
|
|
17
|
+
from dapla_metadata.datasets.utility.constants import OBLIGATORY_METADATA_WARNING
|
|
18
|
+
from dapla_metadata.datasets.utility.utils import get_missing_obligatory_dataset_fields
|
|
19
|
+
from dapla_metadata.datasets.utility.utils import (
|
|
20
|
+
get_missing_obligatory_variables_fields,
|
|
21
|
+
)
|
|
22
|
+
from dapla_metadata.datasets.utility.utils import get_timestamp_now
|
|
23
|
+
from dapla_metadata.datasets.utility.utils import incorrect_date_order
|
|
24
|
+
from dapla_metadata.datasets.utility.utils import (
|
|
25
|
+
num_obligatory_dataset_fields_completed,
|
|
26
|
+
)
|
|
27
|
+
from dapla_metadata.datasets.utility.utils import (
|
|
28
|
+
num_obligatory_variables_fields_completed,
|
|
29
|
+
)
|
|
30
|
+
from dapla_metadata.datasets.utility.utils import set_variables_inherit_from_dataset
|
|
25
31
|
|
|
26
32
|
if TYPE_CHECKING:
|
|
27
33
|
from datetime import datetime
|
|
@@ -9,8 +9,8 @@ import requests
|
|
|
9
9
|
from bs4 import BeautifulSoup
|
|
10
10
|
from bs4 import ResultSet
|
|
11
11
|
|
|
12
|
-
from
|
|
13
|
-
from
|
|
12
|
+
from dapla_metadata.datasets.external_sources.external_sources import GetExternalSource
|
|
13
|
+
from dapla_metadata.datasets.utility.enums import SupportedLanguages
|
|
14
14
|
|
|
15
15
|
if TYPE_CHECKING:
|
|
16
16
|
from concurrent.futures import ThreadPoolExecutor
|
|
@@ -116,17 +116,18 @@ class StatisticSubjectMapping(GetExternalSource):
|
|
|
116
116
|
|
|
117
117
|
Returns a BeautifulSoup ResultSet.
|
|
118
118
|
"""
|
|
119
|
+
if not self.source_url:
|
|
120
|
+
logger.debug("No statistic subject url supplied")
|
|
121
|
+
return None
|
|
122
|
+
|
|
119
123
|
try:
|
|
120
|
-
|
|
121
|
-
response = requests.get(url, timeout=30)
|
|
124
|
+
response = requests.get(str(self.source_url), timeout=30)
|
|
122
125
|
response.encoding = "utf-8"
|
|
123
|
-
logger.debug("Got response %s from %s", response,
|
|
126
|
+
logger.debug("Got response %s from %s", response, self.source_url)
|
|
124
127
|
soup = BeautifulSoup(response.text, features="xml")
|
|
125
128
|
return soup.find_all("hovedemne")
|
|
126
129
|
except requests.exceptions.RequestException:
|
|
127
|
-
logger.exception(
|
|
128
|
-
"Exception while fetching statistical structure ",
|
|
129
|
-
)
|
|
130
|
+
logger.exception("Exception while fetching statistical structure")
|
|
130
131
|
return None
|
|
131
132
|
|
|
132
133
|
def _parse_statistic_subject_structure_xml(
|
|
@@ -6,9 +6,9 @@ from typing import Protocol
|
|
|
6
6
|
|
|
7
7
|
import jwt
|
|
8
8
|
|
|
9
|
-
from
|
|
10
|
-
from
|
|
11
|
-
from
|
|
9
|
+
from dapla_metadata.datasets import config
|
|
10
|
+
from dapla_metadata.datasets.utility.enums import DaplaRegion
|
|
11
|
+
from dapla_metadata.datasets.utility.enums import DaplaService
|
|
12
12
|
|
|
13
13
|
logger = logging.getLogger(__name__)
|
|
14
14
|
|
|
@@ -14,13 +14,20 @@ from datadoc_model.model import Assessment
|
|
|
14
14
|
from datadoc_model.model import DataSetState
|
|
15
15
|
from datadoc_model.model import VariableRole
|
|
16
16
|
|
|
17
|
-
from
|
|
18
|
-
|
|
19
|
-
|
|
17
|
+
from dapla_metadata.datasets.utility.constants import (
|
|
18
|
+
DATASET_FIELDS_FROM_EXISTING_METADATA,
|
|
19
|
+
)
|
|
20
|
+
from dapla_metadata.datasets.utility.constants import NUM_OBLIGATORY_VARIABLES_FIELDS
|
|
21
|
+
from dapla_metadata.datasets.utility.constants import (
|
|
22
|
+
OBLIGATORY_DATASET_METADATA_IDENTIFIERS,
|
|
23
|
+
)
|
|
24
|
+
from dapla_metadata.datasets.utility.constants import (
|
|
20
25
|
OBLIGATORY_DATASET_METADATA_IDENTIFIERS_MULTILANGUAGE,
|
|
21
26
|
)
|
|
22
|
-
from
|
|
23
|
-
|
|
27
|
+
from dapla_metadata.datasets.utility.constants import (
|
|
28
|
+
OBLIGATORY_VARIABLES_METADATA_IDENTIFIERS,
|
|
29
|
+
)
|
|
30
|
+
from dapla_metadata.datasets.utility.constants import (
|
|
24
31
|
OBLIGATORY_VARIABLES_METADATA_IDENTIFIERS_MULTILANGUAGE,
|
|
25
32
|
)
|
|
26
33
|
|
|
@@ -403,3 +410,83 @@ def running_in_notebook() -> bool:
|
|
|
403
410
|
# interpreters and will throw a NameError. Therefore we're not running
|
|
404
411
|
# in Jupyter.
|
|
405
412
|
return False
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
def override_dataset_fields(
|
|
416
|
+
merged_metadata: model.DatadocMetadata,
|
|
417
|
+
existing_metadata: model.DatadocMetadata,
|
|
418
|
+
) -> None:
|
|
419
|
+
"""Overrides specific fields in the dataset of `merged_metadata` with values from the dataset of `existing_metadata`.
|
|
420
|
+
|
|
421
|
+
This function iterates over a predefined list of fields, `DATASET_FIELDS_FROM_EXISTING_METADATA`,
|
|
422
|
+
and sets the corresponding fields in the `merged_metadata.dataset` object to the values
|
|
423
|
+
from the `existing_metadata.dataset` object.
|
|
424
|
+
|
|
425
|
+
Args:
|
|
426
|
+
merged_metadata: An instance of `DatadocMetadata` containing the dataset to be updated.
|
|
427
|
+
existing_metadata: An instance of `DatadocMetadata` containing the dataset whose values are used to update `merged_metadata.dataset`.
|
|
428
|
+
|
|
429
|
+
Returns:
|
|
430
|
+
`None`.
|
|
431
|
+
"""
|
|
432
|
+
if merged_metadata.dataset and existing_metadata.dataset:
|
|
433
|
+
# Override the fields as defined
|
|
434
|
+
for field in DATASET_FIELDS_FROM_EXISTING_METADATA:
|
|
435
|
+
setattr(
|
|
436
|
+
merged_metadata.dataset,
|
|
437
|
+
field,
|
|
438
|
+
getattr(existing_metadata.dataset, field),
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
def merge_variables(
|
|
443
|
+
existing_metadata: model.DatadocMetadata,
|
|
444
|
+
extracted_metadata: model.DatadocMetadata,
|
|
445
|
+
merged_metadata: model.DatadocMetadata,
|
|
446
|
+
) -> model.DatadocMetadata:
|
|
447
|
+
"""Merges variables from the extracted metadata into the existing metadata and updates the merged metadata.
|
|
448
|
+
|
|
449
|
+
This function compares the variables from `extracted_metadata` with those in `existing_metadata`.
|
|
450
|
+
For each variable in `extracted_metadata`, it checks if a variable with the same `short_name` exists
|
|
451
|
+
in `existing_metadata`. If a match is found, it updates the existing variable with information from
|
|
452
|
+
`extracted_metadata`. If no match is found, the variable from `extracted_metadata` is directly added to `merged_metadata`.
|
|
453
|
+
|
|
454
|
+
Args:
|
|
455
|
+
existing_metadata: The metadata object containing the current state of variables.
|
|
456
|
+
extracted_metadata: The metadata object containing new or updated variables to merge.
|
|
457
|
+
merged_metadata: The metadata object that will contain the result of the merge.
|
|
458
|
+
|
|
459
|
+
Returns:
|
|
460
|
+
model.DatadocMetadata: The `merged_metadata` object containing variables from both `existing_metadata`
|
|
461
|
+
and `extracted_metadata`.
|
|
462
|
+
"""
|
|
463
|
+
if (
|
|
464
|
+
existing_metadata.variables is not None
|
|
465
|
+
and extracted_metadata is not None
|
|
466
|
+
and extracted_metadata.variables is not None
|
|
467
|
+
and merged_metadata.variables is not None
|
|
468
|
+
):
|
|
469
|
+
for extracted in extracted_metadata.variables:
|
|
470
|
+
existing = next(
|
|
471
|
+
(
|
|
472
|
+
existing
|
|
473
|
+
for existing in existing_metadata.variables
|
|
474
|
+
if existing.short_name == extracted.short_name
|
|
475
|
+
),
|
|
476
|
+
None,
|
|
477
|
+
)
|
|
478
|
+
if existing:
|
|
479
|
+
existing.id = (
|
|
480
|
+
None # Set to None so that it will be set assigned a fresh ID later
|
|
481
|
+
)
|
|
482
|
+
existing.contains_data_from = (
|
|
483
|
+
extracted.contains_data_from or existing.contains_data_from
|
|
484
|
+
)
|
|
485
|
+
existing.contains_data_until = (
|
|
486
|
+
extracted.contains_data_until or existing.contains_data_until
|
|
487
|
+
)
|
|
488
|
+
merged_metadata.variables.append(existing)
|
|
489
|
+
else:
|
|
490
|
+
# If there is no existing metadata for this variable, we just use what we have extracted
|
|
491
|
+
merged_metadata.variables.append(extracted)
|
|
492
|
+
return merged_metadata
|
{dapla_toolbelt_metadata-0.1.1.dist-info → dapla_toolbelt_metadata-0.2.0.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: dapla-toolbelt-metadata
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.0
|
|
4
4
|
Summary: Dapla Toolbelt Metadata
|
|
5
5
|
Home-page: https://github.com/statisticsnorway/dapla-toolbelt-metadata
|
|
6
6
|
License: MIT
|
|
@@ -15,30 +15,17 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
15
15
|
Classifier: Programming Language :: Python :: 3.12
|
|
16
16
|
Requires-Dist: arrow (>=1.3.0)
|
|
17
17
|
Requires-Dist: beautifulsoup4 (>=4.12.3)
|
|
18
|
-
Requires-Dist: black (>=24.8.0,<25.0.0)
|
|
19
18
|
Requires-Dist: bs4 (>=0.0.2,<0.0.3)
|
|
20
|
-
Requires-Dist: click (>=8.0.1)
|
|
21
19
|
Requires-Dist: cloudpathlib[gs] (>=0.17.0)
|
|
22
|
-
Requires-Dist: coverage (>=7.6.1,<8.0.0)
|
|
23
20
|
Requires-Dist: dapla-toolbelt (>=1.3.3)
|
|
24
|
-
Requires-Dist: faker (>=26.1.0,<27.0.0)
|
|
25
|
-
Requires-Dist: furo (>=2024.7.18,<2025.0.0)
|
|
26
|
-
Requires-Dist: gunicorn (>=21.2.0)
|
|
27
21
|
Requires-Dist: pandas (>=1.4.2)
|
|
28
|
-
Requires-Dist: pre-commit (>=3.8.0,<4.0.0)
|
|
29
22
|
Requires-Dist: pyarrow (>=8.0.0)
|
|
30
23
|
Requires-Dist: pydantic (>=2.5.2)
|
|
31
|
-
Requires-Dist: pygments (>=2.18.0,<3.0.0)
|
|
32
24
|
Requires-Dist: pyjwt (>=2.8.0)
|
|
33
|
-
Requires-Dist: pytest (>=8.3.2,<9.0.0)
|
|
34
|
-
Requires-Dist: pytest-mock (>=3.14.0,<4.0.0)
|
|
35
25
|
Requires-Dist: python-dotenv (>=1.0.1)
|
|
36
26
|
Requires-Dist: requests (>=2.31.0)
|
|
37
|
-
Requires-Dist: requests-mock (>=1.12.1,<2.0.0)
|
|
38
|
-
Requires-Dist: ruff (>=0.5.6,<0.6.0)
|
|
39
27
|
Requires-Dist: ssb-datadoc-model (>=6.0.0,<7.0.0)
|
|
40
28
|
Requires-Dist: ssb-klass-python (>=0.0.9)
|
|
41
|
-
Requires-Dist: types-beautifulsoup4 (>=4.12.0.20240511,<5.0.0.0)
|
|
42
29
|
Project-URL: Changelog, https://github.com/statisticsnorway/dapla-toolbelt-metadata/releases
|
|
43
30
|
Project-URL: Documentation, https://statisticsnorway.github.io/dapla-toolbelt-metadata
|
|
44
31
|
Project-URL: Repository, https://github.com/statisticsnorway/dapla-toolbelt-metadata
|
|
@@ -71,14 +58,16 @@ Description-Content-Type: text/markdown
|
|
|
71
58
|
[black]: https://github.com/psf/black
|
|
72
59
|
[poetry]: https://python-poetry.org/
|
|
73
60
|
|
|
61
|
+
Tools and clients for working with the Dapla Metadata system.
|
|
62
|
+
|
|
74
63
|
## Features
|
|
75
64
|
|
|
76
|
-
-
|
|
65
|
+
- Create and update metadata for datasets (Datadoc).
|
|
77
66
|
|
|
78
|
-
|
|
67
|
+
### Coming
|
|
79
68
|
|
|
80
|
-
-
|
|
81
|
-
-
|
|
69
|
+
- Read, create and update variable definitions.
|
|
70
|
+
- Publish dataset metadata to Statistics Norway's data catalogue.
|
|
82
71
|
|
|
83
72
|
## Installation
|
|
84
73
|
|
|
@@ -90,7 +79,7 @@ pip install dapla-toolbelt-metadata
|
|
|
90
79
|
|
|
91
80
|
## Usage
|
|
92
81
|
|
|
93
|
-
Please see the [Reference Guide] for
|
|
82
|
+
Instructions and examples may be found in the [Dapla Manual](https://manual.dapla.ssb.no/statistikkere/). Please see the [Reference Guide] for API documentation.
|
|
94
83
|
|
|
95
84
|
## Contributing
|
|
96
85
|
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
dapla_metadata/__init__.py,sha256=i8bhabWldn-kGfbdCMANY-iC0LbacRIPU1-LEorXqVY,92
|
|
2
|
+
dapla_metadata/datasets/__init__.py,sha256=TvzskpdFC6hGcC9_55URT5jr5wNAPzXuISd2UjJWM_8,280
|
|
3
|
+
dapla_metadata/datasets/code_list.py,sha256=kp1O6sUiUAP9WKlWY8IgHWx_1IOzJA63WveHqolgKmg,9082
|
|
4
|
+
dapla_metadata/datasets/config.py,sha256=lmujwsKKT_X94AL1zHiot24TOyynm92N5dhMti-dhYs,2215
|
|
5
|
+
dapla_metadata/datasets/core.py,sha256=gUt-LDoQnT8-sFvSpodC6qg4ALgITWkSNOn4dVZuuCQ,21461
|
|
6
|
+
dapla_metadata/datasets/dapla_dataset_path_info.py,sha256=7wwVwykJUaRbqCZrAMsZsOd1p_xO8bHe5LhNOLE8j6k,21600
|
|
7
|
+
dapla_metadata/datasets/dataset_parser.py,sha256=PhhGvkUrmQ2nypYgg4JWushb7gFvde65XbRhmXdPZUI,7965
|
|
8
|
+
dapla_metadata/datasets/external_sources/__init__.py,sha256=qvIdXwqyEmXNUCB94ZtZXRzifdW4hiXASFFPtC70f6E,83
|
|
9
|
+
dapla_metadata/datasets/external_sources/external_sources.py,sha256=9eIcOIUbaodNX1w9Tj2wl4U4wUmr5kF1R0i01fKUzGs,2974
|
|
10
|
+
dapla_metadata/datasets/model_backwards_compatibility.py,sha256=69RKZwOrSyaBQvMCjOZiM-S-clVQu8cIKOUGGpI_87Y,19171
|
|
11
|
+
dapla_metadata/datasets/model_validation.py,sha256=j0yHxiV3QusAwzohUG-pSq-6ojhynnto0z_dD5cSnVs,6875
|
|
12
|
+
dapla_metadata/datasets/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
|
+
dapla_metadata/datasets/statistic_subject_mapping.py,sha256=QdC22DUBOdRgsfmTTEUr9CKCE8fKjMq6-Ezdr6Eof_A,6254
|
|
14
|
+
dapla_metadata/datasets/user_info.py,sha256=6cjIhYHUQmCKnhBjvH6GN3B6H3J-pOjc9NdQcs-NGsE,2589
|
|
15
|
+
dapla_metadata/datasets/utility/__init__.py,sha256=pp6tUcgUbo8iq9OPtFKQrTbLuI3uY7NHptwWSTpasOU,33
|
|
16
|
+
dapla_metadata/datasets/utility/constants.py,sha256=V9ixHTShK1uBcSesaVoKDSHzAh3CX1ATO-Z3wZHxAKs,2417
|
|
17
|
+
dapla_metadata/datasets/utility/enums.py,sha256=C-qlB9ZI4Oy3q1ehbuF0GD7lqJJbuaspY_e8BDFu5DU,727
|
|
18
|
+
dapla_metadata/datasets/utility/utils.py,sha256=wCRZBB3YwXTB_BaqDABaaeBjVmMZP8z3grjfTxX308k,18018
|
|
19
|
+
dapla_toolbelt_metadata-0.2.0.dist-info/LICENSE,sha256=np3IfD5m0ZUofn_kVzDZqliozuiO6wrktw3LRPjyEiI,1073
|
|
20
|
+
dapla_toolbelt_metadata-0.2.0.dist-info/METADATA,sha256=UswEf8UvTCen439oh3m6j-HI8EC8CJpCqThyJis_4no,4927
|
|
21
|
+
dapla_toolbelt_metadata-0.2.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
22
|
+
dapla_toolbelt_metadata-0.2.0.dist-info/RECORD,,
|
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
dataset/__init__.py,sha256=aa6dF2ddfeljw1SElaVqAF6YewuZ0Fxk3q7iebtWR2E,378
|
|
2
|
-
dataset/code_list.py,sha256=SqzassTXL-Gr4TqomXStJFiI5gSN0lXjbmZLDJadMrQ,9050
|
|
3
|
-
dataset/config.py,sha256=EWTmrkLWYKSHVrzEWQO16_CaWrxiUllAnRJFbvEoono,4239
|
|
4
|
-
dataset/core.py,sha256=E7OmFM1iTKlKuvQXuwhucO3z5pKg8fTkE2TmOYFi7_M,22654
|
|
5
|
-
dataset/dapla_dataset_path_info.py,sha256=7wwVwykJUaRbqCZrAMsZsOd1p_xO8bHe5LhNOLE8j6k,21600
|
|
6
|
-
dataset/dataset_parser.py,sha256=AvN4cKaDvP4VwplNR5uvXJdiZh4ippNcFTBll-HhH-4,7949
|
|
7
|
-
dataset/external_sources/__init__.py,sha256=qvIdXwqyEmXNUCB94ZtZXRzifdW4hiXASFFPtC70f6E,83
|
|
8
|
-
dataset/external_sources/external_sources.py,sha256=9eIcOIUbaodNX1w9Tj2wl4U4wUmr5kF1R0i01fKUzGs,2974
|
|
9
|
-
dataset/model_backwards_compatibility.py,sha256=69RKZwOrSyaBQvMCjOZiM-S-clVQu8cIKOUGGpI_87Y,19171
|
|
10
|
-
dataset/model_validation.py,sha256=uj98wiz9SWbJc_He3kGGejy4JIIXM6RKaSccJfmo6wc,6672
|
|
11
|
-
dataset/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
|
-
dataset/statistic_subject_mapping.py,sha256=aOKQLvVNF4XWqVrDXVFUz-Hj_me6JeWW_uEPAKJvVJk,6145
|
|
13
|
-
dataset/user_info.py,sha256=42PikdAQzC1FLOISC49yZO0IgVMWIq_QgxVD0xixaes,2541
|
|
14
|
-
dataset/utility/__init__.py,sha256=pp6tUcgUbo8iq9OPtFKQrTbLuI3uY7NHptwWSTpasOU,33
|
|
15
|
-
dataset/utility/constants.py,sha256=Wv1LIqq2P7ow6sToNdrTOAIMqvyPxNS2j6ArIB-GMds,2301
|
|
16
|
-
dataset/utility/enums.py,sha256=C-qlB9ZI4Oy3q1ehbuF0GD7lqJJbuaspY_e8BDFu5DU,727
|
|
17
|
-
dataset/utility/utils.py,sha256=j2A6DOgb4MmKaEGd5qW8DHxUsTZrZFLLAsvPW1BQIc0,14269
|
|
18
|
-
dapla_toolbelt_metadata-0.1.1.dist-info/LICENSE,sha256=np3IfD5m0ZUofn_kVzDZqliozuiO6wrktw3LRPjyEiI,1073
|
|
19
|
-
dapla_toolbelt_metadata-0.1.1.dist-info/METADATA,sha256=nmA0eQkasfLsUfoRtwKJBd9vD9m8mR9fC9z-AjZj-lM,5158
|
|
20
|
-
dapla_toolbelt_metadata-0.1.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
21
|
-
dapla_toolbelt_metadata-0.1.1.dist-info/RECORD,,
|
dataset/config.py
DELETED
|
@@ -1,151 +0,0 @@
|
|
|
1
|
-
"""Configuration management for dataset package."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
import logging
|
|
6
|
-
import os
|
|
7
|
-
from pathlib import Path
|
|
8
|
-
from pprint import pformat
|
|
9
|
-
from typing import Literal
|
|
10
|
-
|
|
11
|
-
from dotenv import dotenv_values
|
|
12
|
-
from dotenv import load_dotenv
|
|
13
|
-
|
|
14
|
-
from dataset.utility.enums import DaplaRegion
|
|
15
|
-
from dataset.utility.enums import DaplaService
|
|
16
|
-
|
|
17
|
-
logging.basicConfig(level=logging.DEBUG, force=True)
|
|
18
|
-
|
|
19
|
-
logger = logging.getLogger(__name__)
|
|
20
|
-
|
|
21
|
-
DOT_ENV_FILE_PATH = Path(__file__).parent.joinpath(".env")
|
|
22
|
-
|
|
23
|
-
JUPYTERHUB_USER = "JUPYTERHUB_USER"
|
|
24
|
-
DAPLA_REGION = "DAPLA_REGION"
|
|
25
|
-
DAPLA_SERVICE = "DAPLA_SERVICE"
|
|
26
|
-
|
|
27
|
-
env_loaded = False
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
def _load_dotenv_file() -> None:
|
|
31
|
-
global env_loaded # noqa: PLW0603
|
|
32
|
-
if not env_loaded and DOT_ENV_FILE_PATH.exists():
|
|
33
|
-
load_dotenv(DOT_ENV_FILE_PATH)
|
|
34
|
-
env_loaded = True
|
|
35
|
-
logger.info(
|
|
36
|
-
"Loaded .env file with config keys: \n%s",
|
|
37
|
-
pformat(list(dotenv_values(DOT_ENV_FILE_PATH).keys())),
|
|
38
|
-
)
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
def _get_config_item(item: str) -> str | None:
|
|
42
|
-
"""Get a config item. Makes sure all access is logged."""
|
|
43
|
-
_load_dotenv_file()
|
|
44
|
-
value = os.getenv(item)
|
|
45
|
-
logger.debug("Config accessed. %s", item)
|
|
46
|
-
return value
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
def get_jupyterhub_user() -> str | None:
|
|
50
|
-
"""Get the JupyterHub user name."""
|
|
51
|
-
return _get_config_item(JUPYTERHUB_USER)
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
def get_datadoc_dataset_path() -> str | None:
|
|
55
|
-
"""Get the path to the dataset."""
|
|
56
|
-
return _get_config_item("DATADOC_DATASET_PATH")
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
def get_log_level() -> int:
|
|
60
|
-
"""Get the log level."""
|
|
61
|
-
# Magic numbers as defined in Python's stdlib logging
|
|
62
|
-
log_levels: dict[str, int] = {
|
|
63
|
-
"CRITICAL": 50,
|
|
64
|
-
"ERROR": 40,
|
|
65
|
-
"WARNING": 30,
|
|
66
|
-
"INFO": 20,
|
|
67
|
-
"DEBUG": 10,
|
|
68
|
-
}
|
|
69
|
-
if level_string := _get_config_item("DATADOC_LOG_LEVEL"):
|
|
70
|
-
try:
|
|
71
|
-
return log_levels[level_string.upper()]
|
|
72
|
-
except KeyError:
|
|
73
|
-
return log_levels["INFO"]
|
|
74
|
-
else:
|
|
75
|
-
return log_levels["INFO"]
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
def get_log_formatter() -> Literal["simple", "json"]:
|
|
79
|
-
"""Get log formatter configuration."""
|
|
80
|
-
if (
|
|
81
|
-
_get_config_item("DATADOC_ENABLE_JSON_FORMATTING") == "True"
|
|
82
|
-
or get_dapla_region() is not None
|
|
83
|
-
):
|
|
84
|
-
return "json"
|
|
85
|
-
return "simple"
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
def get_jupyterhub_service_prefix() -> str | None:
|
|
89
|
-
"""Get the JupyterHub service prefix."""
|
|
90
|
-
return _get_config_item("JUPYTERHUB_SERVICE_PREFIX")
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
def get_app_name() -> str:
|
|
94
|
-
"""Get the name of the app. Defaults to 'Datadoc'."""
|
|
95
|
-
return _get_config_item("DATADOC_APP_NAME") or "Datadoc"
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
def get_jupyterhub_http_referrer() -> str | None:
|
|
99
|
-
"""Get the JupyterHub http referrer."""
|
|
100
|
-
return _get_config_item("JUPYTERHUB_HTTP_REFERER")
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
def get_port() -> int:
|
|
104
|
-
"""Get the port to run the app on."""
|
|
105
|
-
return int(_get_config_item("DATADOC_PORT") or 7002)
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
def get_statistical_subject_source_url() -> str | None:
|
|
109
|
-
"""Get the URL to the statistical subject source."""
|
|
110
|
-
return _get_config_item("DATADOC_STATISTICAL_SUBJECT_SOURCE_URL")
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
def get_dapla_region() -> DaplaRegion | None:
|
|
114
|
-
"""Get the Dapla region we're running on."""
|
|
115
|
-
if region := _get_config_item(DAPLA_REGION):
|
|
116
|
-
return DaplaRegion(region)
|
|
117
|
-
|
|
118
|
-
return None
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
def get_dapla_service() -> DaplaService | None:
|
|
122
|
-
"""Get the Dapla service we're running on."""
|
|
123
|
-
if service := _get_config_item(DAPLA_SERVICE):
|
|
124
|
-
return DaplaService(service)
|
|
125
|
-
|
|
126
|
-
return None
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
def get_oidc_token() -> str | None:
|
|
130
|
-
"""Get the JWT token from the environment."""
|
|
131
|
-
return _get_config_item("OIDC_TOKEN")
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
def get_unit_code() -> int | None:
|
|
135
|
-
"""The code for the Unit Type code list in Klass."""
|
|
136
|
-
return int(_get_config_item("DATADOC_UNIT_CODE") or 702)
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
def get_measurement_unit_code() -> int | None:
|
|
140
|
-
"""The code for the Measurement Unit code list in Klass."""
|
|
141
|
-
return int(_get_config_item("DATADOC_MEASUREMENT_UNIT") or 303)
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
def get_organisational_unit_code() -> int | None:
|
|
145
|
-
"""The code for the organisational units code list in Klass."""
|
|
146
|
-
return int(_get_config_item("DATADOC_ORGANISATIONAL_UNIT_CODE") or 83)
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
def get_data_source_code() -> int | None:
|
|
150
|
-
"""The code for the organisational units code list in Klass."""
|
|
151
|
-
return int(_get_config_item("DATADOC_DATA_SOURCE_CODE") or 712)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|