cognite-extractor-utils 7.5.4__py3-none-any.whl → 7.5.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cognite-extractor-utils might be problematic. Click here for more details.
- cognite/extractorutils/__init__.py +3 -1
- cognite/extractorutils/_inner_util.py +14 -3
- cognite/extractorutils/base.py +14 -15
- cognite/extractorutils/configtools/__init__.py +25 -0
- cognite/extractorutils/configtools/_util.py +7 -9
- cognite/extractorutils/configtools/elements.py +58 -49
- cognite/extractorutils/configtools/loaders.py +29 -26
- cognite/extractorutils/configtools/validators.py +2 -3
- cognite/extractorutils/exceptions.py +1 -4
- cognite/extractorutils/metrics.py +18 -18
- cognite/extractorutils/statestore/_base.py +3 -4
- cognite/extractorutils/statestore/hashing.py +24 -24
- cognite/extractorutils/statestore/watermark.py +17 -14
- cognite/extractorutils/threading.py +4 -4
- cognite/extractorutils/unstable/configuration/exceptions.py +24 -0
- cognite/extractorutils/unstable/configuration/loaders.py +18 -7
- cognite/extractorutils/unstable/configuration/models.py +25 -3
- cognite/extractorutils/unstable/core/_dto.py +10 -0
- cognite/extractorutils/unstable/core/base.py +179 -29
- cognite/extractorutils/unstable/core/errors.py +72 -0
- cognite/extractorutils/unstable/core/restart_policy.py +29 -0
- cognite/extractorutils/unstable/core/runtime.py +170 -26
- cognite/extractorutils/unstable/core/tasks.py +2 -0
- cognite/extractorutils/unstable/scheduling/_scheduler.py +4 -4
- cognite/extractorutils/uploader/__init__.py +14 -0
- cognite/extractorutils/uploader/_base.py +8 -8
- cognite/extractorutils/uploader/assets.py +15 -9
- cognite/extractorutils/uploader/data_modeling.py +13 -13
- cognite/extractorutils/uploader/events.py +9 -9
- cognite/extractorutils/uploader/files.py +127 -31
- cognite/extractorutils/uploader/raw.py +10 -10
- cognite/extractorutils/uploader/time_series.py +56 -58
- cognite/extractorutils/uploader/upload_failure_handler.py +64 -0
- cognite/extractorutils/uploader_extractor.py +11 -11
- cognite/extractorutils/uploader_types.py +4 -12
- cognite/extractorutils/util.py +21 -23
- {cognite_extractor_utils-7.5.4.dist-info → cognite_extractor_utils-7.5.5.dist-info}/METADATA +3 -2
- cognite_extractor_utils-7.5.5.dist-info/RECORD +49 -0
- {cognite_extractor_utils-7.5.4.dist-info → cognite_extractor_utils-7.5.5.dist-info}/WHEEL +1 -1
- cognite/extractorutils/unstable/core/__main__.py +0 -31
- cognite_extractor_utils-7.5.4.dist-info/RECORD +0 -46
- {cognite_extractor_utils-7.5.4.dist-info → cognite_extractor_utils-7.5.5.dist-info}/LICENSE +0 -0
|
@@ -13,9 +13,6 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
15
|
|
|
16
|
-
from typing import List, Optional
|
|
17
|
-
|
|
18
|
-
|
|
19
16
|
class InvalidConfigError(Exception):
|
|
20
17
|
"""
|
|
21
18
|
Exception thrown from ``load_yaml`` and ``load_yaml_dict`` if config file is invalid. This can be due to
|
|
@@ -25,7 +22,7 @@ class InvalidConfigError(Exception):
|
|
|
25
22
|
* Unkown fields
|
|
26
23
|
"""
|
|
27
24
|
|
|
28
|
-
def __init__(self, message: str, details:
|
|
25
|
+
def __init__(self, message: str, details: list[str] | None = None):
|
|
29
26
|
super(InvalidConfigError, self).__init__()
|
|
30
27
|
self.message = message
|
|
31
28
|
self.details = details
|
|
@@ -43,7 +43,7 @@ import threading
|
|
|
43
43
|
from abc import ABC, abstractmethod
|
|
44
44
|
from time import sleep
|
|
45
45
|
from types import TracebackType
|
|
46
|
-
from typing import Any, Callable,
|
|
46
|
+
from typing import Any, Callable, Type, TypeVar
|
|
47
47
|
|
|
48
48
|
import arrow
|
|
49
49
|
import psutil
|
|
@@ -177,14 +177,14 @@ class AbstractMetricsPusher(ABC):
|
|
|
177
177
|
|
|
178
178
|
def __init__(
|
|
179
179
|
self,
|
|
180
|
-
push_interval:
|
|
181
|
-
thread_name:
|
|
182
|
-
cancellation_token:
|
|
180
|
+
push_interval: int | None = None,
|
|
181
|
+
thread_name: str | None = None,
|
|
182
|
+
cancellation_token: CancellationToken | None = None,
|
|
183
183
|
):
|
|
184
184
|
self.push_interval = push_interval
|
|
185
185
|
self.thread_name = thread_name
|
|
186
186
|
|
|
187
|
-
self.thread:
|
|
187
|
+
self.thread: threading.Thread | None = None
|
|
188
188
|
self.thread_name = thread_name
|
|
189
189
|
self.cancellation_token = cancellation_token.create_child_token() if cancellation_token else CancellationToken()
|
|
190
190
|
|
|
@@ -232,7 +232,7 @@ class AbstractMetricsPusher(ABC):
|
|
|
232
232
|
return self
|
|
233
233
|
|
|
234
234
|
def __exit__(
|
|
235
|
-
self, exc_type:
|
|
235
|
+
self, exc_type: Type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
|
|
236
236
|
) -> None:
|
|
237
237
|
"""
|
|
238
238
|
Wraps around stop method, for use as context manager
|
|
@@ -264,10 +264,10 @@ class PrometheusPusher(AbstractMetricsPusher):
|
|
|
264
264
|
job_name: str,
|
|
265
265
|
url: str,
|
|
266
266
|
push_interval: int,
|
|
267
|
-
username:
|
|
268
|
-
password:
|
|
269
|
-
thread_name:
|
|
270
|
-
cancellation_token:
|
|
267
|
+
username: str | None = None,
|
|
268
|
+
password: str | None = None,
|
|
269
|
+
thread_name: str | None = None,
|
|
270
|
+
cancellation_token: CancellationToken | None = None,
|
|
271
271
|
):
|
|
272
272
|
super(PrometheusPusher, self).__init__(push_interval, thread_name, cancellation_token)
|
|
273
273
|
|
|
@@ -277,7 +277,7 @@ class PrometheusPusher(AbstractMetricsPusher):
|
|
|
277
277
|
|
|
278
278
|
self.url = url
|
|
279
279
|
|
|
280
|
-
def _auth_handler(self, url: str, method: str, timeout: int, headers:
|
|
280
|
+
def _auth_handler(self, url: str, method: str, timeout: int, headers: list[tuple[str, str]], data: Any) -> Callable:
|
|
281
281
|
"""
|
|
282
282
|
Returns a authentication handler against the Prometheus Pushgateway to use in the pushadd_to_gateway method.
|
|
283
283
|
|
|
@@ -340,10 +340,10 @@ class CognitePusher(AbstractMetricsPusher):
|
|
|
340
340
|
cdf_client: CogniteClient,
|
|
341
341
|
external_id_prefix: str,
|
|
342
342
|
push_interval: int,
|
|
343
|
-
asset:
|
|
344
|
-
data_set:
|
|
345
|
-
thread_name:
|
|
346
|
-
cancellation_token:
|
|
343
|
+
asset: Asset | None = None,
|
|
344
|
+
data_set: EitherId | None = None,
|
|
345
|
+
thread_name: str | None = None,
|
|
346
|
+
cancellation_token: CancellationToken | None = None,
|
|
347
347
|
):
|
|
348
348
|
super(CognitePusher, self).__init__(push_interval, thread_name, cancellation_token)
|
|
349
349
|
|
|
@@ -360,11 +360,11 @@ class CognitePusher(AbstractMetricsPusher):
|
|
|
360
360
|
"""
|
|
361
361
|
Initialize the CDF tenant with the necessary time series and asset.
|
|
362
362
|
"""
|
|
363
|
-
time_series:
|
|
363
|
+
time_series: list[TimeSeries] = []
|
|
364
364
|
|
|
365
365
|
if self.asset is not None:
|
|
366
366
|
# Ensure that asset exist, and retrieve internal ID
|
|
367
|
-
asset:
|
|
367
|
+
asset: Asset | None
|
|
368
368
|
try:
|
|
369
369
|
asset = self.cdf_client.assets.create(self.asset)
|
|
370
370
|
except CogniteDuplicatedError:
|
|
@@ -406,7 +406,7 @@ class CognitePusher(AbstractMetricsPusher):
|
|
|
406
406
|
"""
|
|
407
407
|
timestamp = int(arrow.get().float_timestamp * 1000)
|
|
408
408
|
|
|
409
|
-
datapoints:
|
|
409
|
+
datapoints: list[dict[str, str | int | list[Any] | Datapoints | DatapointsArray]] = []
|
|
410
410
|
|
|
411
411
|
for metric in REGISTRY.collect():
|
|
412
412
|
if type(metric) == Metric and metric.type in ["gauge", "counter"]:
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import threading
|
|
3
3
|
from abc import ABC, abstractmethod
|
|
4
|
-
from typing import Optional
|
|
5
4
|
|
|
6
5
|
from cognite.extractorutils._inner_util import _resolve_log_level
|
|
7
6
|
from cognite.extractorutils.threading import CancellationToken
|
|
@@ -15,10 +14,10 @@ RETRIES = 10
|
|
|
15
14
|
class _BaseStateStore(ABC):
|
|
16
15
|
def __init__(
|
|
17
16
|
self,
|
|
18
|
-
save_interval:
|
|
17
|
+
save_interval: int | None = None,
|
|
19
18
|
trigger_log_level: str = "DEBUG",
|
|
20
|
-
thread_name:
|
|
21
|
-
cancellation_token:
|
|
19
|
+
thread_name: str | None = None,
|
|
20
|
+
cancellation_token: CancellationToken | None = None,
|
|
22
21
|
) -> None:
|
|
23
22
|
self._initialized = False
|
|
24
23
|
|
|
@@ -2,7 +2,7 @@ import hashlib
|
|
|
2
2
|
import json
|
|
3
3
|
from abc import ABC
|
|
4
4
|
from types import TracebackType
|
|
5
|
-
from typing import Any,
|
|
5
|
+
from typing import Any, Iterable, Iterator, Type
|
|
6
6
|
|
|
7
7
|
import orjson
|
|
8
8
|
|
|
@@ -19,10 +19,10 @@ from ._base import RETRIES, RETRY_BACKOFF_FACTOR, RETRY_DELAY, RETRY_MAX_DELAY,
|
|
|
19
19
|
class AbstractHashStateStore(_BaseStateStore, ABC):
|
|
20
20
|
def __init__(
|
|
21
21
|
self,
|
|
22
|
-
save_interval:
|
|
22
|
+
save_interval: int | None = None,
|
|
23
23
|
trigger_log_level: str = "DEBUG",
|
|
24
|
-
thread_name:
|
|
25
|
-
cancellation_token:
|
|
24
|
+
thread_name: str | None = None,
|
|
25
|
+
cancellation_token: CancellationToken | None = None,
|
|
26
26
|
) -> None:
|
|
27
27
|
super().__init__(
|
|
28
28
|
save_interval=save_interval,
|
|
@@ -31,31 +31,31 @@ class AbstractHashStateStore(_BaseStateStore, ABC):
|
|
|
31
31
|
cancellation_token=cancellation_token,
|
|
32
32
|
)
|
|
33
33
|
|
|
34
|
-
self._local_state:
|
|
35
|
-
self._seen:
|
|
34
|
+
self._local_state: dict[str, dict[str, str]] = {}
|
|
35
|
+
self._seen: set[str] = set()
|
|
36
36
|
|
|
37
|
-
def get_state(self, external_id: str) ->
|
|
37
|
+
def get_state(self, external_id: str) -> str | None:
|
|
38
38
|
with self.lock:
|
|
39
39
|
return self._local_state.get(external_id, {}).get("digest")
|
|
40
40
|
|
|
41
|
-
def _hash_row(self, data:
|
|
41
|
+
def _hash_row(self, data: dict[str, Any]) -> str:
|
|
42
42
|
return hashlib.sha256(orjson.dumps(data, option=orjson.OPT_SORT_KEYS)).hexdigest()
|
|
43
43
|
|
|
44
|
-
def set_state(self, external_id: str, data:
|
|
44
|
+
def set_state(self, external_id: str, data: dict[str, Any]) -> None:
|
|
45
45
|
with self.lock:
|
|
46
46
|
self._local_state[external_id] = {"digest": self._hash_row(data)}
|
|
47
47
|
|
|
48
|
-
def has_changed(self, external_id: str, data:
|
|
48
|
+
def has_changed(self, external_id: str, data: dict[str, Any]) -> bool:
|
|
49
49
|
with self.lock:
|
|
50
50
|
if external_id not in self._local_state:
|
|
51
51
|
return True
|
|
52
52
|
|
|
53
53
|
return self._hash_row(data) != self._local_state[external_id]["digest"]
|
|
54
54
|
|
|
55
|
-
def __getitem__(self, external_id: str) ->
|
|
55
|
+
def __getitem__(self, external_id: str) -> str | None:
|
|
56
56
|
return self.get_state(external_id)
|
|
57
57
|
|
|
58
|
-
def __setitem__(self, key: str, value:
|
|
58
|
+
def __setitem__(self, key: str, value: dict[str, Any]) -> None:
|
|
59
59
|
self.set_state(external_id=key, data=value)
|
|
60
60
|
|
|
61
61
|
def __contains__(self, external_id: str) -> bool:
|
|
@@ -76,10 +76,10 @@ class RawHashStateStore(AbstractHashStateStore):
|
|
|
76
76
|
cdf_client: CogniteClient,
|
|
77
77
|
database: str,
|
|
78
78
|
table: str,
|
|
79
|
-
save_interval:
|
|
79
|
+
save_interval: int | None = None,
|
|
80
80
|
trigger_log_level: str = "DEBUG",
|
|
81
|
-
thread_name:
|
|
82
|
-
cancellation_token:
|
|
81
|
+
thread_name: str | None = None,
|
|
82
|
+
cancellation_token: CancellationToken | None = None,
|
|
83
83
|
) -> None:
|
|
84
84
|
super().__init__(
|
|
85
85
|
save_interval=save_interval,
|
|
@@ -169,9 +169,9 @@ class RawHashStateStore(AbstractHashStateStore):
|
|
|
169
169
|
|
|
170
170
|
def __exit__(
|
|
171
171
|
self,
|
|
172
|
-
exc_type:
|
|
173
|
-
exc_val:
|
|
174
|
-
exc_tb:
|
|
172
|
+
exc_type: Type[BaseException] | None,
|
|
173
|
+
exc_val: BaseException | None,
|
|
174
|
+
exc_tb: TracebackType | None,
|
|
175
175
|
) -> None:
|
|
176
176
|
"""
|
|
177
177
|
Wraps around stop method, for use as context manager
|
|
@@ -188,10 +188,10 @@ class LocalHashStateStore(AbstractHashStateStore):
|
|
|
188
188
|
def __init__(
|
|
189
189
|
self,
|
|
190
190
|
file_path: str,
|
|
191
|
-
save_interval:
|
|
191
|
+
save_interval: int | None = None,
|
|
192
192
|
trigger_log_level: str = "DEBUG",
|
|
193
|
-
thread_name:
|
|
194
|
-
cancellation_token:
|
|
193
|
+
thread_name: str | None = None,
|
|
194
|
+
cancellation_token: CancellationToken | None = None,
|
|
195
195
|
) -> None:
|
|
196
196
|
super().__init__(
|
|
197
197
|
save_interval=save_interval,
|
|
@@ -243,9 +243,9 @@ class LocalHashStateStore(AbstractHashStateStore):
|
|
|
243
243
|
|
|
244
244
|
def __exit__(
|
|
245
245
|
self,
|
|
246
|
-
exc_type:
|
|
247
|
-
exc_val:
|
|
248
|
-
exc_tb:
|
|
246
|
+
exc_type: Type[BaseException] | None,
|
|
247
|
+
exc_val: BaseException | None,
|
|
248
|
+
exc_tb: TracebackType | None,
|
|
249
249
|
) -> None:
|
|
250
250
|
"""
|
|
251
251
|
Wraps around stop method, for use as context manager
|
|
@@ -88,7 +88,7 @@ You can set a state store to automatically update on upload triggers from an upl
|
|
|
88
88
|
import json
|
|
89
89
|
from abc import ABC
|
|
90
90
|
from types import TracebackType
|
|
91
|
-
from typing import Any, Callable, Dict, Iterator, List,
|
|
91
|
+
from typing import Any, Callable, Dict, Iterator, List, Tuple, Type, Union
|
|
92
92
|
|
|
93
93
|
from cognite.client import CogniteClient
|
|
94
94
|
from cognite.client.exceptions import CogniteAPIError
|
|
@@ -114,10 +114,10 @@ class AbstractStateStore(_BaseStateStore, ABC):
|
|
|
114
114
|
|
|
115
115
|
def __init__(
|
|
116
116
|
self,
|
|
117
|
-
save_interval:
|
|
117
|
+
save_interval: int | None = None,
|
|
118
118
|
trigger_log_level: str = "DEBUG",
|
|
119
|
-
thread_name:
|
|
120
|
-
cancellation_token:
|
|
119
|
+
thread_name: str | None = None,
|
|
120
|
+
cancellation_token: CancellationToken | None = None,
|
|
121
121
|
):
|
|
122
122
|
super().__init__(
|
|
123
123
|
save_interval=save_interval,
|
|
@@ -152,7 +152,7 @@ class AbstractStateStore(_BaseStateStore, ABC):
|
|
|
152
152
|
state = self._local_state.get(external_id, {})
|
|
153
153
|
return state.get("low"), state.get("high")
|
|
154
154
|
|
|
155
|
-
def set_state(self, external_id: str, low:
|
|
155
|
+
def set_state(self, external_id: str, low: Any | None = None, high: Any | None = None) -> None:
|
|
156
156
|
"""
|
|
157
157
|
Set/update state of a singe external ID.
|
|
158
158
|
|
|
@@ -166,7 +166,7 @@ class AbstractStateStore(_BaseStateStore, ABC):
|
|
|
166
166
|
state["low"] = low if low is not None else state.get("low")
|
|
167
167
|
state["high"] = high if high is not None else state.get("high")
|
|
168
168
|
|
|
169
|
-
def expand_state(self, external_id: str, low:
|
|
169
|
+
def expand_state(self, external_id: str, low: Any | None = None, high: Any | None = None) -> None:
|
|
170
170
|
"""
|
|
171
171
|
Like set_state, but only sets state if the proposed state is outside the stored state. That is if e.g. low is
|
|
172
172
|
lower than the stored low.
|
|
@@ -275,10 +275,10 @@ class RawStateStore(AbstractStateStore):
|
|
|
275
275
|
cdf_client: CogniteClient,
|
|
276
276
|
database: str,
|
|
277
277
|
table: str,
|
|
278
|
-
save_interval:
|
|
278
|
+
save_interval: int | None = None,
|
|
279
279
|
trigger_log_level: str = "DEBUG",
|
|
280
|
-
thread_name:
|
|
281
|
-
cancellation_token:
|
|
280
|
+
thread_name: str | None = None,
|
|
281
|
+
cancellation_token: CancellationToken | None = None,
|
|
282
282
|
):
|
|
283
283
|
super().__init__(save_interval, trigger_log_level, thread_name, cancellation_token)
|
|
284
284
|
|
|
@@ -380,7 +380,7 @@ class RawStateStore(AbstractStateStore):
|
|
|
380
380
|
return self
|
|
381
381
|
|
|
382
382
|
def __exit__(
|
|
383
|
-
self, exc_type:
|
|
383
|
+
self, exc_type: Type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
|
|
384
384
|
) -> None:
|
|
385
385
|
"""
|
|
386
386
|
Wraps around stop method, for use as context manager
|
|
@@ -409,10 +409,10 @@ class LocalStateStore(AbstractStateStore):
|
|
|
409
409
|
def __init__(
|
|
410
410
|
self,
|
|
411
411
|
file_path: str,
|
|
412
|
-
save_interval:
|
|
412
|
+
save_interval: int | None = None,
|
|
413
413
|
trigger_log_level: str = "DEBUG",
|
|
414
|
-
thread_name:
|
|
415
|
-
cancellation_token:
|
|
414
|
+
thread_name: str | None = None,
|
|
415
|
+
cancellation_token: CancellationToken | None = None,
|
|
416
416
|
):
|
|
417
417
|
super().__init__(save_interval, trigger_log_level, thread_name, cancellation_token)
|
|
418
418
|
|
|
@@ -459,7 +459,10 @@ class LocalStateStore(AbstractStateStore):
|
|
|
459
459
|
return self
|
|
460
460
|
|
|
461
461
|
def __exit__(
|
|
462
|
-
self,
|
|
462
|
+
self,
|
|
463
|
+
exc_type: Type[BaseException] | None,
|
|
464
|
+
exc_val: BaseException | None,
|
|
465
|
+
exc_tb: TracebackType | None,
|
|
463
466
|
) -> None:
|
|
464
467
|
"""
|
|
465
468
|
Wraps around stop method, for use as context manager
|
|
@@ -2,7 +2,7 @@ import logging
|
|
|
2
2
|
import signal
|
|
3
3
|
from threading import Condition
|
|
4
4
|
from time import time
|
|
5
|
-
from typing import Any
|
|
5
|
+
from typing import Any
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
class CancellationToken:
|
|
@@ -14,10 +14,10 @@ class CancellationToken:
|
|
|
14
14
|
cancelled if the parent is cancelled, but can be canceled alone without affecting the parent token.
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
|
-
def __init__(self, condition:
|
|
17
|
+
def __init__(self, condition: Condition | None = None) -> None:
|
|
18
18
|
self._cv: Condition = condition or Condition()
|
|
19
19
|
self._is_cancelled_int: bool = False
|
|
20
|
-
self._parent:
|
|
20
|
+
self._parent: "CancellationToken" | None = None
|
|
21
21
|
|
|
22
22
|
def __repr__(self) -> str:
|
|
23
23
|
cls = self.__class__
|
|
@@ -59,7 +59,7 @@ class CancellationToken:
|
|
|
59
59
|
"""
|
|
60
60
|
self.cancel()
|
|
61
61
|
|
|
62
|
-
def wait(self, timeout:
|
|
62
|
+
def wait(self, timeout: float | None = None) -> bool:
|
|
63
63
|
endtime = None
|
|
64
64
|
if timeout is not None:
|
|
65
65
|
endtime = time() + timeout
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from typing import List, Optional
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class InvalidConfigError(Exception):
|
|
5
|
+
"""
|
|
6
|
+
Exception thrown from ``load_yaml`` and ``load_yaml_dict`` if config file is invalid. This can be due to
|
|
7
|
+
|
|
8
|
+
* Missing fields
|
|
9
|
+
* Incompatible types
|
|
10
|
+
* Unkown fields
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, message: str, details: Optional[List[str]] = None):
|
|
14
|
+
super(InvalidConfigError, self).__init__()
|
|
15
|
+
self.message = message
|
|
16
|
+
self.details = details
|
|
17
|
+
|
|
18
|
+
self.attempted_revision: int | None = None
|
|
19
|
+
|
|
20
|
+
def __str__(self) -> str:
|
|
21
|
+
return f"Invalid config: {self.message}"
|
|
22
|
+
|
|
23
|
+
def __repr__(self) -> str:
|
|
24
|
+
return self.__str__()
|
|
@@ -8,9 +8,13 @@ from pydantic import ValidationError
|
|
|
8
8
|
|
|
9
9
|
from cognite.client import CogniteClient
|
|
10
10
|
from cognite.extractorutils.configtools.loaders import _load_yaml_dict_raw
|
|
11
|
-
from cognite.extractorutils.exceptions import InvalidConfigError
|
|
11
|
+
from cognite.extractorutils.exceptions import InvalidConfigError as OldInvalidConfigError
|
|
12
|
+
from cognite.extractorutils.unstable.configuration.exceptions import InvalidConfigError
|
|
12
13
|
from cognite.extractorutils.unstable.configuration.models import ConfigModel
|
|
13
14
|
|
|
15
|
+
__all__ = ["ConfigFormat", "load_file", "load_from_cdf", "load_io", "load_dict"]
|
|
16
|
+
|
|
17
|
+
|
|
14
18
|
_T = TypeVar("_T", bound=ConfigModel)
|
|
15
19
|
|
|
16
20
|
|
|
@@ -44,7 +48,17 @@ def load_from_cdf(
|
|
|
44
48
|
)
|
|
45
49
|
response.raise_for_status()
|
|
46
50
|
data = response.json()
|
|
47
|
-
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
return load_io(StringIO(data["config"]), ConfigFormat.YAML, schema), data["revision"]
|
|
54
|
+
|
|
55
|
+
except InvalidConfigError as e:
|
|
56
|
+
e.attempted_revision = data["revision"]
|
|
57
|
+
raise e
|
|
58
|
+
except OldInvalidConfigError as e:
|
|
59
|
+
new_e = InvalidConfigError(e.message)
|
|
60
|
+
new_e.attempted_revision = data["revision"]
|
|
61
|
+
raise new_e from e
|
|
48
62
|
|
|
49
63
|
|
|
50
64
|
def load_io(stream: TextIO, format: ConfigFormat, schema: Type[_T]) -> _T:
|
|
@@ -100,12 +114,9 @@ def load_dict(data: dict, schema: Type[_T]) -> _T:
|
|
|
100
114
|
if "ctx" in err and "error" in err["ctx"]:
|
|
101
115
|
exc = err["ctx"]["error"]
|
|
102
116
|
if isinstance(exc, ValueError) or isinstance(exc, AssertionError):
|
|
103
|
-
messages.append(f"{
|
|
117
|
+
messages.append(f"{str(exc)}: {loc_str}")
|
|
104
118
|
continue
|
|
105
119
|
|
|
106
|
-
|
|
107
|
-
messages.append(f"{err.get('msg')}: {loc_str}")
|
|
108
|
-
else:
|
|
109
|
-
messages.append(f"{loc_str}: {err.get('msg')}")
|
|
120
|
+
messages.append(f"{err.get('msg')}: {loc_str}")
|
|
110
121
|
|
|
111
122
|
raise InvalidConfigError(", ".join(messages), details=messages) from e
|
|
@@ -19,6 +19,21 @@ from cognite.client.credentials import (
|
|
|
19
19
|
from cognite.extractorutils.configtools._util import _load_certificate_data
|
|
20
20
|
from cognite.extractorutils.exceptions import InvalidConfigError
|
|
21
21
|
|
|
22
|
+
__all__ = [
|
|
23
|
+
"ConfigModel",
|
|
24
|
+
"AuthenticationConfig",
|
|
25
|
+
"TimeIntervalConfig",
|
|
26
|
+
"ConnectionConfig",
|
|
27
|
+
"CronConfig",
|
|
28
|
+
"IntervalConfig",
|
|
29
|
+
"ScheduleConfig",
|
|
30
|
+
"LogLevel",
|
|
31
|
+
"LogFileHandlerConfig",
|
|
32
|
+
"LogConsoleHandlerConfig",
|
|
33
|
+
"LogHandlerConfig",
|
|
34
|
+
"ExtractorConfig",
|
|
35
|
+
]
|
|
36
|
+
|
|
22
37
|
|
|
23
38
|
class ConfigModel(BaseModel):
|
|
24
39
|
model_config = ConfigDict(
|
|
@@ -139,7 +154,7 @@ class ConnectionConfig(ConfigModel):
|
|
|
139
154
|
project: str
|
|
140
155
|
base_url: str
|
|
141
156
|
|
|
142
|
-
|
|
157
|
+
integration: str
|
|
143
158
|
|
|
144
159
|
authentication: AuthenticationConfig
|
|
145
160
|
|
|
@@ -223,17 +238,24 @@ class LogLevel(Enum):
|
|
|
223
238
|
|
|
224
239
|
|
|
225
240
|
class LogFileHandlerConfig(ConfigModel):
|
|
241
|
+
type: Literal["file"]
|
|
226
242
|
path: Path
|
|
227
243
|
level: LogLevel
|
|
228
244
|
retention: int = 7
|
|
229
245
|
|
|
230
246
|
|
|
231
247
|
class LogConsoleHandlerConfig(ConfigModel):
|
|
248
|
+
type: Literal["console"]
|
|
232
249
|
level: LogLevel
|
|
233
250
|
|
|
234
251
|
|
|
235
|
-
LogHandlerConfig =
|
|
252
|
+
LogHandlerConfig = Annotated[LogFileHandlerConfig | LogConsoleHandlerConfig, Field(discriminator="type")]
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
# Mypy BS
|
|
256
|
+
def _log_handler_default() -> List[LogHandlerConfig]:
|
|
257
|
+
return [LogConsoleHandlerConfig(type="console", level=LogLevel.INFO)]
|
|
236
258
|
|
|
237
259
|
|
|
238
260
|
class ExtractorConfig(ConfigModel):
|
|
239
|
-
log_handlers: List[LogHandlerConfig] = Field(default_factory=
|
|
261
|
+
log_handlers: List[LogHandlerConfig] = Field(default_factory=_log_handler_default)
|
|
@@ -32,3 +32,13 @@ class TaskUpdate(CogniteModel):
|
|
|
32
32
|
type: Literal["started"] | Literal["ended"]
|
|
33
33
|
name: str
|
|
34
34
|
timestamp: int
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class Error(CogniteModel):
|
|
38
|
+
external_id: str
|
|
39
|
+
level: str
|
|
40
|
+
description: str
|
|
41
|
+
details: str | None
|
|
42
|
+
start_time: int
|
|
43
|
+
end_time: int | None
|
|
44
|
+
task: str | None
|