ipulse-shared-core-ftredge 2.7.1__py3-none-any.whl → 2.8.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.
- ipulse_shared_core_ftredge/__init__.py +7 -12
- ipulse_shared_core_ftredge/logging/__init__.py +1 -0
- ipulse_shared_core_ftredge/logging/logging_handlers_and_formatters.py +144 -0
- ipulse_shared_core_ftredge/logging/utils_logging.py +72 -0
- ipulse_shared_core_ftredge/utils/__init__.py +1 -21
- ipulse_shared_core_ftredge/utils/utils_common.py +3 -173
- {ipulse_shared_core_ftredge-2.7.1.dist-info → ipulse_shared_core_ftredge-2.8.1.dist-info}/METADATA +1 -2
- ipulse_shared_core_ftredge-2.8.1.dist-info/RECORD +19 -0
- ipulse_shared_core_ftredge/enums/__init__.py +0 -37
- ipulse_shared_core_ftredge/enums/enums_common_utils.py +0 -107
- ipulse_shared_core_ftredge/enums/enums_data_eng.py +0 -313
- ipulse_shared_core_ftredge/enums/enums_logging.py +0 -108
- ipulse_shared_core_ftredge/enums/enums_module_fincore.py +0 -72
- ipulse_shared_core_ftredge/enums/enums_modules.py +0 -31
- ipulse_shared_core_ftredge/enums/enums_solution_providers.py +0 -24
- ipulse_shared_core_ftredge/enums/pulse_enums.py +0 -182
- ipulse_shared_core_ftredge/utils/logs/__init__.py +0 -2
- ipulse_shared_core_ftredge/utils/logs/context_log.py +0 -210
- ipulse_shared_core_ftredge/utils/logs/get_logger.py +0 -103
- ipulse_shared_core_ftredge/utils/utils_cloud.py +0 -53
- ipulse_shared_core_ftredge/utils/utils_cloud_gcp.py +0 -442
- ipulse_shared_core_ftredge/utils/utils_cloud_gcp_with_collectors.py +0 -166
- ipulse_shared_core_ftredge/utils/utils_cloud_with_collectors.py +0 -27
- ipulse_shared_core_ftredge/utils/utils_collector_pipelinemon.py +0 -356
- ipulse_shared_core_ftredge/utils/utils_templates_and_schemas.py +0 -151
- ipulse_shared_core_ftredge-2.7.1.dist-info/RECORD +0 -33
- /ipulse_shared_core_ftredge/{utils/logs → logging}/audit_log_firestore.py +0 -0
- {ipulse_shared_core_ftredge-2.7.1.dist-info → ipulse_shared_core_ftredge-2.8.1.dist-info}/LICENCE +0 -0
- {ipulse_shared_core_ftredge-2.7.1.dist-info → ipulse_shared_core_ftredge-2.8.1.dist-info}/WHEEL +0 -0
- {ipulse_shared_core_ftredge-2.7.1.dist-info → ipulse_shared_core_ftredge-2.8.1.dist-info}/top_level.txt +0 -0
|
@@ -1,356 +0,0 @@
|
|
|
1
|
-
# pylint: disable=missing-module-docstring
|
|
2
|
-
# pylint: disable=missing-function-docstring
|
|
3
|
-
# pylint: disable=logging-fstring-interpolation
|
|
4
|
-
# pylint: disable=line-too-long
|
|
5
|
-
# pylint: disable=missing-class-docstring
|
|
6
|
-
# pylint: disable=broad-exception-caught
|
|
7
|
-
import json
|
|
8
|
-
import uuid
|
|
9
|
-
from datetime import datetime, timezone
|
|
10
|
-
from contextlib import contextmanager
|
|
11
|
-
from typing import List
|
|
12
|
-
from ipulse_shared_core_ftredge.enums import LogLevel, TargetLogs
|
|
13
|
-
from ipulse_shared_core_ftredge.utils.logs import ContextLog
|
|
14
|
-
############################################################################
|
|
15
|
-
##### PIPINEMON Collector for Logs and Statuses of running pipelines #######
|
|
16
|
-
class Pipelinemon:
|
|
17
|
-
ERROR_START_CODE = LogLevel.ERROR.value
|
|
18
|
-
WARNING_START_CODE = LogLevel.WARNING.value
|
|
19
|
-
NOTICE_START_CODE = LogLevel.NOTICE.value
|
|
20
|
-
SUCCESS_START_CODE = LogLevel.SUCCESS.value
|
|
21
|
-
INFO_START_CODE = LogLevel.INFO.value
|
|
22
|
-
|
|
23
|
-
def __init__(self, base_context: str, logger, target_logs: TargetLogs = TargetLogs.MIXED,
|
|
24
|
-
max_log_field_size:int =10000, max_log_dict_size:float=256 * 1024 * 0.80, max_log_traceback_lines:int = 30):
|
|
25
|
-
|
|
26
|
-
self._id = str(uuid.uuid4())
|
|
27
|
-
self._logs = []
|
|
28
|
-
self._early_stop = False
|
|
29
|
-
self._errors_count = 0
|
|
30
|
-
self._warnings_count = 0
|
|
31
|
-
self._notices_count = 0
|
|
32
|
-
self._successes_count = 0
|
|
33
|
-
self._infos_count = 0
|
|
34
|
-
self._systems_impacted = []
|
|
35
|
-
self._by_level_counts = {level.name: 0 for level in LogLevel}
|
|
36
|
-
self._base_context = base_context
|
|
37
|
-
self._context_stack = []
|
|
38
|
-
self._target_logs = target_logs.value
|
|
39
|
-
self._logger = logger
|
|
40
|
-
self._max_log_field_size = max_log_field_size
|
|
41
|
-
self._max_log_dict_size = max_log_dict_size
|
|
42
|
-
self._max_log_traceback_lines = max_log_traceback_lines
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
@contextmanager
|
|
46
|
-
def context(self, context):
|
|
47
|
-
self.push_context(context)
|
|
48
|
-
try:
|
|
49
|
-
yield
|
|
50
|
-
finally:
|
|
51
|
-
self.pop_context()
|
|
52
|
-
|
|
53
|
-
def push_context(self, context):
|
|
54
|
-
self._context_stack.append(context)
|
|
55
|
-
|
|
56
|
-
def pop_context(self):
|
|
57
|
-
if self._context_stack:
|
|
58
|
-
self._context_stack.pop()
|
|
59
|
-
|
|
60
|
-
@property
|
|
61
|
-
def current_context(self):
|
|
62
|
-
return " >> ".join(self._context_stack)
|
|
63
|
-
|
|
64
|
-
@property
|
|
65
|
-
def base_context(self):
|
|
66
|
-
return self._base_context
|
|
67
|
-
|
|
68
|
-
@property
|
|
69
|
-
def id(self):
|
|
70
|
-
return self._id
|
|
71
|
-
|
|
72
|
-
@property
|
|
73
|
-
def systems_impacted(self):
|
|
74
|
-
return self._systems_impacted
|
|
75
|
-
|
|
76
|
-
@systems_impacted.setter
|
|
77
|
-
def systems_impacted(self, list_of_si: List[str]):
|
|
78
|
-
self._systems_impacted = list_of_si
|
|
79
|
-
|
|
80
|
-
def add_system_impacted(self, system_impacted: str):
|
|
81
|
-
if self._systems_impacted is None:
|
|
82
|
-
self._systems_impacted = []
|
|
83
|
-
self._systems_impacted.append(system_impacted)
|
|
84
|
-
|
|
85
|
-
def clear_systems_impacted(self):
|
|
86
|
-
self._systems_impacted = []
|
|
87
|
-
|
|
88
|
-
@property
|
|
89
|
-
def max_log_dict_size(self):
|
|
90
|
-
return self._max_log_dict_size
|
|
91
|
-
|
|
92
|
-
@max_log_dict_size.setter
|
|
93
|
-
def max_log_dict_size(self, value):
|
|
94
|
-
self._max_log_dict_size = value
|
|
95
|
-
|
|
96
|
-
@property
|
|
97
|
-
def max_log_field_size(self):
|
|
98
|
-
return self._max_log_field_size
|
|
99
|
-
|
|
100
|
-
@max_log_field_size.setter
|
|
101
|
-
def max_log_field_size(self, value):
|
|
102
|
-
self._max_log_field_size = value
|
|
103
|
-
|
|
104
|
-
@property
|
|
105
|
-
def max_log_traceback_lines(self):
|
|
106
|
-
return self._max_log_traceback_lines
|
|
107
|
-
|
|
108
|
-
@max_log_traceback_lines.setter
|
|
109
|
-
def max_log_traceback_lines(self, value):
|
|
110
|
-
self._max_log_traceback_lines = value
|
|
111
|
-
|
|
112
|
-
@property
|
|
113
|
-
def early_stop(self):
|
|
114
|
-
return self._early_stop
|
|
115
|
-
|
|
116
|
-
def set_early_stop(self, max_errors_tolerance:int=0, max_warnings_tolerance:int=0, create_error_log=True, pop_context=False):
|
|
117
|
-
self._early_stop = True
|
|
118
|
-
if create_error_log:
|
|
119
|
-
if pop_context:
|
|
120
|
-
self.pop_context()
|
|
121
|
-
if max_errors_tolerance > 0:
|
|
122
|
-
self.add_log(ContextLog(level=LogLevel.ERROR_PIPELINE_THRESHOLD_REACHED,
|
|
123
|
-
subject="EARLY_STOP",
|
|
124
|
-
description=f"Total MAX_ERRORS_TOLERANCE of {max_errors_tolerance} has been reached."))
|
|
125
|
-
elif max_warnings_tolerance > 0:
|
|
126
|
-
self.add_log(ContextLog(level=LogLevel.ERROR_PIPELINE_THRESHOLD_REACHED,
|
|
127
|
-
subject="EARLY_STOP",
|
|
128
|
-
description=f"Total MAX_WARNINGS_TOLERANCE of {max_warnings_tolerance} has been reached."))
|
|
129
|
-
else:
|
|
130
|
-
self.add_log(ContextLog(level=LogLevel.ERROR_PIPELINE_THRESHOLD_REACHED,
|
|
131
|
-
subject="EARLY_STOP",
|
|
132
|
-
description="Early stop has been triggered."))
|
|
133
|
-
|
|
134
|
-
def reset_early_stop(self):
|
|
135
|
-
self._early_stop = False
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
def add_log(self, log: ContextLog ):
|
|
139
|
-
if (self._target_logs == TargetLogs.SUCCESSES and log.level >=self.NOTICE_START_CODE) or \
|
|
140
|
-
(self._target_logs == TargetLogs.WARNINGS_AND_ERRORS and log.level.value < self.WARNING_START_CODE):
|
|
141
|
-
raise ValueError(f"Invalid log level {log.level.name} for Pipelinemon target logs setup: {self._target_logs}")
|
|
142
|
-
log.base_context = self.base_context
|
|
143
|
-
log.context = self.current_context if self.current_context else "root"
|
|
144
|
-
log.collector_id = self.id
|
|
145
|
-
log.systems_impacted = self.systems_impacted
|
|
146
|
-
log_dict = log.to_dict(max_field_len=self.max_log_field_size, size_limit=self.max_log_dict_size, max_traceback_lines=self.max_log_traceback_lines)
|
|
147
|
-
self._logs.append(log_dict)
|
|
148
|
-
self._update_counts(log_dict)
|
|
149
|
-
|
|
150
|
-
if self._logger:
|
|
151
|
-
# We specifically want to avoid having an ERROR log level for this structured Pipelinemon reporting, to ensure Errors are alerting on Critical Application Services.
|
|
152
|
-
# A single ERROR log level is usually added at the end of the entire pipeline
|
|
153
|
-
if log.level.value >= self.WARNING_START_CODE:
|
|
154
|
-
self._logger.warning(log_dict)
|
|
155
|
-
else:
|
|
156
|
-
self._logger.info(log_dict)
|
|
157
|
-
|
|
158
|
-
def add_logs(self, logs: List[ContextLog]):
|
|
159
|
-
for log in logs:
|
|
160
|
-
self.add_log(log)
|
|
161
|
-
|
|
162
|
-
def clear_logs_and_counts(self):
|
|
163
|
-
self._logs = []
|
|
164
|
-
self._errors_count = 0
|
|
165
|
-
self._warnings_count = 0
|
|
166
|
-
self._notices_count = 0
|
|
167
|
-
self._successes_count = 0
|
|
168
|
-
self._infos_count = 0
|
|
169
|
-
self._by_level_counts = {level.name: 0 for level in LogLevel}
|
|
170
|
-
|
|
171
|
-
def clear_logs(self):
|
|
172
|
-
self._logs = []
|
|
173
|
-
|
|
174
|
-
def get_all_logs(self,in_json_format=False):
|
|
175
|
-
if in_json_format:
|
|
176
|
-
return json.dumps(self._logs)
|
|
177
|
-
return self._logs
|
|
178
|
-
|
|
179
|
-
def get_logs_for_level(self, level: LogLevel):
|
|
180
|
-
return [log for log in self._logs if log["level_code"] == level.value]
|
|
181
|
-
|
|
182
|
-
def get_logs_by_str_in_context(self, context_substring: str):
|
|
183
|
-
return [
|
|
184
|
-
log for log in self._logs
|
|
185
|
-
if context_substring in log["context"]
|
|
186
|
-
]
|
|
187
|
-
|
|
188
|
-
def contains_errors(self):
|
|
189
|
-
return self._errors_count > 0
|
|
190
|
-
|
|
191
|
-
def count_errors(self):
|
|
192
|
-
return self._errors_count
|
|
193
|
-
|
|
194
|
-
def contains_warnings_or_errors(self):
|
|
195
|
-
return self._warnings_count > 0 or self._errors_count > 0
|
|
196
|
-
|
|
197
|
-
def count_warnings_and_errors(self):
|
|
198
|
-
return self._warnings_count + self._errors_count
|
|
199
|
-
|
|
200
|
-
def count_warnings(self):
|
|
201
|
-
return self._warnings_count
|
|
202
|
-
|
|
203
|
-
def count_notices(self):
|
|
204
|
-
return self._notices_count
|
|
205
|
-
|
|
206
|
-
def count_successes(self):
|
|
207
|
-
return self._successes_count
|
|
208
|
-
|
|
209
|
-
def count_successes_with_notice(self):
|
|
210
|
-
return self.count_logs_by_level(LogLevel.SUCCESS_WITH_NOTICES)
|
|
211
|
-
|
|
212
|
-
def count_successes_no_notice(self):
|
|
213
|
-
return self.count_logs_by_level(LogLevel.SUCCESS)
|
|
214
|
-
|
|
215
|
-
def count_infos(self):
|
|
216
|
-
return self._infos_count
|
|
217
|
-
|
|
218
|
-
def count_all_logs(self):
|
|
219
|
-
return len(self._logs)
|
|
220
|
-
|
|
221
|
-
def count_logs_by_level(self, level: LogLevel):
|
|
222
|
-
return self._by_level_counts.get(level.name, 0)
|
|
223
|
-
|
|
224
|
-
def _count_logs(self, context_substring: str, exact_match=False, level_code_min=None, level_code_max=None):
|
|
225
|
-
return sum(
|
|
226
|
-
1 for log in self._logs
|
|
227
|
-
if (log["context"] == context_substring if exact_match else context_substring in log["context"]) and
|
|
228
|
-
(level_code_min is None or log["level_code"] >= level_code_min) and
|
|
229
|
-
(level_code_max is None or log["level_code"] <= level_code_max)
|
|
230
|
-
)
|
|
231
|
-
|
|
232
|
-
def count_logs_for_current_context(self):
|
|
233
|
-
return self._count_logs(self.current_context, exact_match=True)
|
|
234
|
-
|
|
235
|
-
def count_logs_for_current_and_nested_contexts(self):
|
|
236
|
-
return self._count_logs(self.current_context)
|
|
237
|
-
|
|
238
|
-
def count_logs_by_level_for_current_context(self, level: LogLevel):
|
|
239
|
-
return self._count_logs(self.current_context, exact_match=True, level_code_min=level.value, level_code_max=level.value)
|
|
240
|
-
|
|
241
|
-
def count_logs_by_level_for_current_and_nested_contexts(self, level: LogLevel):
|
|
242
|
-
return self._count_logs(self.current_context, level_code_min=level.value, level_code_max=level.value)
|
|
243
|
-
|
|
244
|
-
def count_errors_for_current_context(self):
|
|
245
|
-
return self._count_logs(self.current_context, exact_match=True, level_code_min=self.ERROR_START_CODE)
|
|
246
|
-
|
|
247
|
-
def count_errors_for_current_and_nested_contexts(self):
|
|
248
|
-
return self._count_logs(self.current_context, level_code_min=self.ERROR_START_CODE)
|
|
249
|
-
|
|
250
|
-
def count_warnings_and_errors_for_current_context(self):
|
|
251
|
-
return self._count_logs(self.current_context, exact_match=True, level_code_min=self.WARNING_START_CODE)
|
|
252
|
-
|
|
253
|
-
def count_warnings_and_errors_for_current_and_nested_contexts(self):
|
|
254
|
-
return self._count_logs(self.current_context, level_code_min=self.WARNING_START_CODE)
|
|
255
|
-
|
|
256
|
-
def count_warnings_for_current_context(self):
|
|
257
|
-
return self._count_logs(self.current_context, exact_match=True, level_code_min=self.WARNING_START_CODE, level_code_max=self.ERROR_START_CODE - 1)
|
|
258
|
-
|
|
259
|
-
def count_warnings_for_current_and_nested_contexts(self):
|
|
260
|
-
return self._count_logs(self.current_context, level_code_min=self.WARNING_START_CODE, level_code_max=self.ERROR_START_CODE - 1)
|
|
261
|
-
|
|
262
|
-
def count_notices_for_current_context(self):
|
|
263
|
-
return self._count_logs(self.current_context, exact_match=True, level_code_min=self.NOTICE_START_CODE, level_code_max=self.WARNING_START_CODE-1)
|
|
264
|
-
|
|
265
|
-
def count_notices_for_current_and_nested_contexts(self):
|
|
266
|
-
return self._count_logs(self.current_context, level_code_min=self.NOTICE_START_CODE, level_code_max=self.WARNING_START_CODE-1)
|
|
267
|
-
|
|
268
|
-
def count_successes_for_current_context(self):
|
|
269
|
-
return self._count_logs(self.current_context, exact_match=True, level_code_min=self.SUCCESS_START_CODE, level_code_max=self.NOTICE_START_CODE-1)
|
|
270
|
-
|
|
271
|
-
def count_successes_for_current_and_nested_contexts(self):
|
|
272
|
-
return self._count_logs(self.current_context, level_code_min=self.SUCCESS_START_CODE, level_code_max=self.NOTICE_START_CODE-1)
|
|
273
|
-
|
|
274
|
-
def count_infos_for_current_context(self):
|
|
275
|
-
return self._count_logs(self.current_context, exact_match=True, level_code_min=self.INFO_START_CODE, level_code_max=self.SUCCESS_START_CODE-1)
|
|
276
|
-
|
|
277
|
-
def count_infos_for_current_and_nested_contexts(self):
|
|
278
|
-
return self._count_logs(self.current_context, level_code_min=self.INFO_START_CODE, level_code_max=self.SUCCESS_START_CODE-1)
|
|
279
|
-
|
|
280
|
-
def _update_counts(self, log, remove=False):
|
|
281
|
-
level_code = log["level_code"]
|
|
282
|
-
level_name = log["level_name"]
|
|
283
|
-
|
|
284
|
-
if remove:
|
|
285
|
-
if level_code >= self.ERROR_START_CODE:
|
|
286
|
-
self._errors_count -= 1
|
|
287
|
-
elif self.WARNING_START_CODE <= level_code < self.ERROR_START_CODE:
|
|
288
|
-
self._warnings_count -= 1
|
|
289
|
-
elif self.NOTICE_START_CODE <= level_code < self.WARNING_START_CODE:
|
|
290
|
-
self._notices_count -= 1
|
|
291
|
-
elif self.SUCCESS_START_CODE <= level_code < self.NOTICE_START_CODE:
|
|
292
|
-
self._successes_count -= 1
|
|
293
|
-
elif self.INFO_START_CODE <= level_code < self.SUCCESS_START_CODE:
|
|
294
|
-
self._infos_count -= 1
|
|
295
|
-
self._by_level_counts[level_name] -= 1
|
|
296
|
-
else:
|
|
297
|
-
if level_code >= self.ERROR_START_CODE:
|
|
298
|
-
self._errors_count += 1
|
|
299
|
-
elif self.WARNING_START_CODE <= level_code < self.ERROR_START_CODE:
|
|
300
|
-
self._warnings_count += 1
|
|
301
|
-
elif self.NOTICE_START_CODE <= level_code < self.WARNING_START_CODE:
|
|
302
|
-
self._notices_count += 1
|
|
303
|
-
elif self.SUCCESS_START_CODE <= level_code < self.NOTICE_START_CODE:
|
|
304
|
-
self._successes_count += 1
|
|
305
|
-
elif self.INFO_START_CODE <= level_code < self.SUCCESS_START_CODE:
|
|
306
|
-
self._infos_count += 1
|
|
307
|
-
self._by_level_counts[level_name] += 1
|
|
308
|
-
|
|
309
|
-
def generate_file_name(self, file_prefix=None, include_base_context=True):
|
|
310
|
-
if not file_prefix:
|
|
311
|
-
file_prefix = self._target_logs
|
|
312
|
-
timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
|
|
313
|
-
if include_base_context:
|
|
314
|
-
file_name = f"{file_prefix}_{timestamp}_{self.base_context}_len{len(self._logs)}.json"
|
|
315
|
-
else:
|
|
316
|
-
file_name = f"{file_prefix}_{timestamp}_len{len(self._logs)}.json"
|
|
317
|
-
|
|
318
|
-
return file_name
|
|
319
|
-
|
|
320
|
-
def import_logs_from_json(self, json_or_file, logger=None):
|
|
321
|
-
def log_message(message):
|
|
322
|
-
if logger:
|
|
323
|
-
logger.info(message)
|
|
324
|
-
|
|
325
|
-
def log_warning(message, exc_info=False):
|
|
326
|
-
if logger:
|
|
327
|
-
logger.warning(message, exc_info=exc_info)
|
|
328
|
-
|
|
329
|
-
try:
|
|
330
|
-
if isinstance(json_or_file, str): # Load from string
|
|
331
|
-
imported_logs = json.loads(json_or_file)
|
|
332
|
-
elif hasattr(json_or_file, 'read'): # Load from file-like object
|
|
333
|
-
imported_logs = json.load(json_or_file)
|
|
334
|
-
self.add_logs(imported_logs)
|
|
335
|
-
log_message("Successfully imported logs from json.")
|
|
336
|
-
except Exception as e:
|
|
337
|
-
log_warning(f"Failed to import logs from json: {type(e).__name__} - {str(e)}", exc_info=True)
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
def generate_final_log_message(self, subjectref: str, total_subjs: int) -> str:
|
|
341
|
-
return f"""
|
|
342
|
-
Pipeline for {self.base_context} with pipelinemon.id: {self.id},
|
|
343
|
-
SUMMARY: Early_Stop= {self.early_stop} ;
|
|
344
|
-
A)SUCCESSES: {self.count_successes()}/{total_subjs} {subjectref}(s) ; out of which SUCCESSES_WITH_NOTICES: {self.count_successes_with_notice()}/{total_subjs} {subjectref}(s) ;
|
|
345
|
-
B)NOTICES: {self.count_notices()} ;
|
|
346
|
-
C)WARNINGS: {self.count_warnings()} ;
|
|
347
|
-
D)ERRORS: {self.count_errors()} ;
|
|
348
|
-
E)INFOS: {self.count_infos()} ;
|
|
349
|
-
"""
|
|
350
|
-
|
|
351
|
-
def log_final_message(self, subjectref: str, total_subjs: int, generallogger):
|
|
352
|
-
final_log_message = self.generate_final_log_message(subjectref=subjectref, total_subjs=total_subjs)
|
|
353
|
-
if self.count_warnings_and_errors() > 0:
|
|
354
|
-
generallogger.error(final_log_message)
|
|
355
|
-
else:
|
|
356
|
-
generallogger.info(final_log_message)
|
|
@@ -1,151 +0,0 @@
|
|
|
1
|
-
# pylint: disable=missing-module-docstring
|
|
2
|
-
# pylint: disable=missing-function-docstring
|
|
3
|
-
# pylint: disable=logging-fstring-interpolation
|
|
4
|
-
# pylint: disable=line-too-long
|
|
5
|
-
# pylint: disable=broad-exception-caught
|
|
6
|
-
|
|
7
|
-
import datetime
|
|
8
|
-
from ipulse_shared_core_ftredge.enums import LogLevel
|
|
9
|
-
from ipulse_shared_core_ftredge.utils.logs import ContextLog
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
def check_format_against_schema_template(data_to_check, schema, dt_ts_to_str=True, check_max_length=True):
|
|
13
|
-
"""Ensure Update dict corresponds to the config schema, ensuring proper formats and lengths."""
|
|
14
|
-
checked_data = {}
|
|
15
|
-
warnings_or_error = [] # Group warnings and errors for a given run
|
|
16
|
-
|
|
17
|
-
try:
|
|
18
|
-
# Process updates to conform to the schema
|
|
19
|
-
for field in schema:
|
|
20
|
-
field_name = field["name"]
|
|
21
|
-
field_type = field["type"]
|
|
22
|
-
mode = field["mode"]
|
|
23
|
-
|
|
24
|
-
# Initialize notice to None at the start of each field processing
|
|
25
|
-
warning = None
|
|
26
|
-
|
|
27
|
-
if field_name in data_to_check:
|
|
28
|
-
value = data_to_check[field_name]
|
|
29
|
-
if value is None:
|
|
30
|
-
if mode == "REQUIRED":
|
|
31
|
-
warnings_or_error.append(ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
32
|
-
subject=field_name,
|
|
33
|
-
description=f"Required field '{field_name}' is missing in the updates."))
|
|
34
|
-
continue
|
|
35
|
-
else:
|
|
36
|
-
# Handle date and timestamp formatting
|
|
37
|
-
if field_type == "DATE":
|
|
38
|
-
value, warning = handle_date_fields(field_name, value, dt_ts_to_str)
|
|
39
|
-
elif field_type == "TIMESTAMP":
|
|
40
|
-
value, warning = handle_timestamp_fields(field_name, value, dt_ts_to_str)
|
|
41
|
-
elif field_type in ["STRING", "INT64", "FLOAT64", "BOOL"]:
|
|
42
|
-
value, warning = handle_type_conversion(field_type, field_name, value)
|
|
43
|
-
|
|
44
|
-
if warning:
|
|
45
|
-
warnings_or_error.append(warning)
|
|
46
|
-
|
|
47
|
-
# Check and handle max length restriction
|
|
48
|
-
if check_max_length and "max_length" in field:
|
|
49
|
-
value, warning = check_and_truncate_length(field_name, value, field["max_length"])
|
|
50
|
-
if warning:
|
|
51
|
-
warnings_or_error.append(warning)
|
|
52
|
-
|
|
53
|
-
# Only add to the dictionary if value is not None or the field is required
|
|
54
|
-
checked_data[field_name] = value
|
|
55
|
-
|
|
56
|
-
elif mode == "REQUIRED":
|
|
57
|
-
warnings_or_error.append(ContextLog(level=LogLevel.WARNING,
|
|
58
|
-
subject=field_name,
|
|
59
|
-
description=f"Required field '{field_name}' is missing in the updates."))
|
|
60
|
-
|
|
61
|
-
except Exception as e:
|
|
62
|
-
warnings_or_error.append(ContextLog(level=LogLevel.ERROR_EXCEPTION,
|
|
63
|
-
e=e,
|
|
64
|
-
subject=data_to_check,
|
|
65
|
-
description=f"An error occurred during update check: {str(e)}"))
|
|
66
|
-
|
|
67
|
-
return checked_data, warnings_or_error
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
def handle_date_fields(field_name, value, dt_ts_to_str):
|
|
72
|
-
"""Handles date fields, ensuring they are in the correct format and optionally converts them to string."""
|
|
73
|
-
if isinstance(value, datetime.date):
|
|
74
|
-
if dt_ts_to_str:
|
|
75
|
-
return value.strftime("%Y-%m-%d"), None
|
|
76
|
-
return value, None
|
|
77
|
-
elif isinstance(value, str):
|
|
78
|
-
try:
|
|
79
|
-
parsed_date = datetime.datetime.strptime(value, "%Y-%m-%d").date()
|
|
80
|
-
if dt_ts_to_str:
|
|
81
|
-
return value, None
|
|
82
|
-
return parsed_date, None
|
|
83
|
-
except ValueError:
|
|
84
|
-
return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
85
|
-
subject=field_name,
|
|
86
|
-
description=f"Expected a DATE in YYYY-MM-DD format but got {value}.")
|
|
87
|
-
else:
|
|
88
|
-
return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
89
|
-
subject=field_name,
|
|
90
|
-
description= f"Expected a DATE or YYYY-MM-DD str format but got {value} of type {type(value).__name__}.")
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
def handle_timestamp_fields(field_name, value, dt_ts_to_str):
|
|
94
|
-
"""Handles timestamp fields, ensuring they are in the correct format and optionally converts them to ISO format string."""
|
|
95
|
-
if isinstance(value, datetime.datetime):
|
|
96
|
-
if dt_ts_to_str:
|
|
97
|
-
return value.isoformat(), None
|
|
98
|
-
return value, None
|
|
99
|
-
elif isinstance(value, str):
|
|
100
|
-
try:
|
|
101
|
-
parsed_datetime = datetime.datetime.fromisoformat(value)
|
|
102
|
-
if dt_ts_to_str:
|
|
103
|
-
return value, None
|
|
104
|
-
return parsed_datetime, None
|
|
105
|
-
except ValueError:
|
|
106
|
-
return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
107
|
-
subject=field_name,
|
|
108
|
-
description= f"Expected ISO format TIMESTAMP but got {value}.")
|
|
109
|
-
else:
|
|
110
|
-
return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
111
|
-
subject=field_name,
|
|
112
|
-
description= f"Expected ISO format TIMESTAMP but got {value} of type {type(value).__name__}.")
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
def check_and_truncate_length(field_name, value, max_length):
|
|
116
|
-
"""Checks and truncates the length of string fields if they exceed the max length."""
|
|
117
|
-
if isinstance(value, str) and len(value) > max_length:
|
|
118
|
-
return value[:max_length], ContextLog(level=LogLevel.WARNING_FIX_RECOMMENDED,
|
|
119
|
-
subject= field_name,
|
|
120
|
-
description= f"Field exceeds max length: {len(value)}/{max_length}. Truncating.")
|
|
121
|
-
|
|
122
|
-
return value, None
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
def handle_type_conversion(field_type, field_name, value):
|
|
127
|
-
if field_type == "STRING" and not isinstance(value, str):
|
|
128
|
-
return str(value), ContextLog(level=LogLevel.WARNING_REVIEW_RECOMMENDED,
|
|
129
|
-
subject=field_name,
|
|
130
|
-
description= f"Expected STRING but got {value} of type {type(value).__name__}.")
|
|
131
|
-
|
|
132
|
-
if field_type == "INT64" and not isinstance(value, int):
|
|
133
|
-
try:
|
|
134
|
-
return int(value), None
|
|
135
|
-
except ValueError:
|
|
136
|
-
return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
137
|
-
subject= field_name,
|
|
138
|
-
description=f"Expected INTEGER, but got {value} of type {type(value).__name__}.")
|
|
139
|
-
if field_type == "FLOAT64" and not isinstance(value, float):
|
|
140
|
-
try:
|
|
141
|
-
return float(value), None
|
|
142
|
-
except ValueError:
|
|
143
|
-
return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
144
|
-
subject=field_name,
|
|
145
|
-
description=f"Expected FLOAT, but got {value} of type {type(value).__name__}.")
|
|
146
|
-
if field_type == "BOOL" and not isinstance(value, bool):
|
|
147
|
-
return bool(value), ContextLog(level=LogLevel.WARNING_REVIEW_RECOMMENDED,
|
|
148
|
-
subject=field_name,
|
|
149
|
-
description=f"Expected BOOL, but got {value}. Converting as {bool(value)}.")
|
|
150
|
-
|
|
151
|
-
return value, None
|
|
@@ -1,33 +0,0 @@
|
|
|
1
|
-
ipulse_shared_core_ftredge/__init__.py,sha256=0tmR-zLphsZP6z3uQkF9uMVtry5gYPjiYYzddI_IznM,1222
|
|
2
|
-
ipulse_shared_core_ftredge/enums/__init__.py,sha256=NDv8Vy6_XXRjo0lpTA8SS00KAhb0OtQKJKAvrJhRoK8,1307
|
|
3
|
-
ipulse_shared_core_ftredge/enums/enums_common_utils.py,sha256=O-GE1177xD5tdo7qlKkq38ZoQmnc89uc90KWSDa0FhE,3626
|
|
4
|
-
ipulse_shared_core_ftredge/enums/enums_data_eng.py,sha256=LA4BNTReDXtmLBNQy1z0r4ECOI1LDk2i9MK4XX08ZuU,9960
|
|
5
|
-
ipulse_shared_core_ftredge/enums/enums_logging.py,sha256=N84hR427aJurD1P4XFS1BXuVY-M_rICFsZ03iXR88Zc,4775
|
|
6
|
-
ipulse_shared_core_ftredge/enums/enums_module_fincore.py,sha256=o0sCVjRxr1G_6ZVmvu64VOVIFCkzvJhBOTz8IilkA2s,1621
|
|
7
|
-
ipulse_shared_core_ftredge/enums/enums_modules.py,sha256=8dt6EVYpiyRJ8jaJxz9Wj7_H4ixeX1DK1j4Q0SBayL0,648
|
|
8
|
-
ipulse_shared_core_ftredge/enums/enums_solution_providers.py,sha256=pSmM0JtzR2n0FBJ5Unec0gWMD5zGo6fHwdcCnmGL1oE,497
|
|
9
|
-
ipulse_shared_core_ftredge/enums/pulse_enums.py,sha256=0RjIJbK0pt1Mzo4k_lhhxZL8myEUergQwOY9JOLZIJ4,4716
|
|
10
|
-
ipulse_shared_core_ftredge/models/__init__.py,sha256=MeGH2ZBxkrwldUiWyUaI_TMyfq78tuSwRkN_mEfKD8U,161
|
|
11
|
-
ipulse_shared_core_ftredge/models/organisation.py,sha256=22esRGYuJmKN3papkgozleEmDNJrVwUgIzKp7annvWs,3280
|
|
12
|
-
ipulse_shared_core_ftredge/models/resource_catalog_item.py,sha256=mEGX8AftzrhEHqFVXjr62CuRnXC1vK4z3bHl_XBJodU,4964
|
|
13
|
-
ipulse_shared_core_ftredge/models/user_auth.py,sha256=35HNN7ZW4ZELCqaJrAtoSsVLFAZ1KL2S_VmuzbcEMm4,119
|
|
14
|
-
ipulse_shared_core_ftredge/models/user_profile.py,sha256=D3BB9D6XEv7IVZgsURgf0hWmUZW5rms3uiBXS0ZGLeE,1927
|
|
15
|
-
ipulse_shared_core_ftredge/models/user_profile_update.py,sha256=oKK0XsQDKkgDvjFPhX2XlqEqlKLBQ4AkvPHXEuZbFMY,1712
|
|
16
|
-
ipulse_shared_core_ftredge/models/user_status.py,sha256=8TyRd8tBK9_xb0MPKbI5pn9-lX7ovKbeiuWYYPtIOiw,3202
|
|
17
|
-
ipulse_shared_core_ftredge/utils/__init__.py,sha256=yY4NX-W37xkTWeHOrR-Vni4FNCewaFJ7b9Beejyunyw,887
|
|
18
|
-
ipulse_shared_core_ftredge/utils/utils_cloud.py,sha256=LEsp0L1KfsNqp9nkqxi3TXSuZxXQQenIvagnlJWYytc,2993
|
|
19
|
-
ipulse_shared_core_ftredge/utils/utils_cloud_gcp.py,sha256=5A0qgzFQ2eMe_2uMX4Untxt0G49PX-g-o4XxKENpDQM,22156
|
|
20
|
-
ipulse_shared_core_ftredge/utils/utils_cloud_gcp_with_collectors.py,sha256=Y6_qbZdiVU4kFW3Zk2CjudCUlF1Xrp4m_sLqN7rWCAE,11500
|
|
21
|
-
ipulse_shared_core_ftredge/utils/utils_cloud_with_collectors.py,sha256=vDHt2FrSgj-qmt7Pip-lVbaov8tE3bfQVYKlZ3YcuUE,1612
|
|
22
|
-
ipulse_shared_core_ftredge/utils/utils_collector_pipelinemon.py,sha256=M4Qi1gDxtkPclya64K9JRKWcMhbpcer6LPxDzgpUd04,14995
|
|
23
|
-
ipulse_shared_core_ftredge/utils/utils_common.py,sha256=HTklVthhlMyewR5-qxo6HjMQRokXd-paruoKgENBEKc,8121
|
|
24
|
-
ipulse_shared_core_ftredge/utils/utils_templates_and_schemas.py,sha256=pn2m8IVVjCPKKb9E_SqpUekcwaxyqI3AYn7HjhcVVhM,7489
|
|
25
|
-
ipulse_shared_core_ftredge/utils/logs/__init__.py,sha256=fY6UNr197HYwUDi7uj5fsXw1Ma5gyAFzALxnoDSHFG0,71
|
|
26
|
-
ipulse_shared_core_ftredge/utils/logs/audit_log_firestore.py,sha256=5AwO6NHuOncq65n400eqM8QPrS2EGGaP3Z_6l2rxdBE,261
|
|
27
|
-
ipulse_shared_core_ftredge/utils/logs/context_log.py,sha256=AfDM_R4Xt0nueAMzkwLSPsembseXwSpvnGNv8tbqB1w,8615
|
|
28
|
-
ipulse_shared_core_ftredge/utils/logs/get_logger.py,sha256=u4FTEeNU320s-LJ7-gdhWafWIdWPZsSZl2ymQoc7D3Q,4534
|
|
29
|
-
ipulse_shared_core_ftredge-2.7.1.dist-info/LICENCE,sha256=YBtYAXNqCCOo9Mr2hfkbSPAM9CeAr2j1VZBSwQTrNwE,1060
|
|
30
|
-
ipulse_shared_core_ftredge-2.7.1.dist-info/METADATA,sha256=_pDlpIXITli6boHmvHKLXg4LvrSVBmphutZcAVKHkU4,556
|
|
31
|
-
ipulse_shared_core_ftredge-2.7.1.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
|
|
32
|
-
ipulse_shared_core_ftredge-2.7.1.dist-info/top_level.txt,sha256=8sgYrptpexkA_6_HyGvho26cVFH9kmtGvaK8tHbsGHk,27
|
|
33
|
-
ipulse_shared_core_ftredge-2.7.1.dist-info/RECORD,,
|
|
File without changes
|
{ipulse_shared_core_ftredge-2.7.1.dist-info → ipulse_shared_core_ftredge-2.8.1.dist-info}/LICENCE
RENAMED
|
File without changes
|
{ipulse_shared_core_ftredge-2.7.1.dist-info → ipulse_shared_core_ftredge-2.8.1.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|