ipulse-shared-core-ftredge 2.50__tar.gz → 2.51__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.

Files changed (32) hide show
  1. {ipulse_shared_core_ftredge-2.50/src/ipulse_shared_core_ftredge.egg-info → ipulse_shared_core_ftredge-2.51}/PKG-INFO +1 -1
  2. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/setup.py +1 -1
  3. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/__init__.py +2 -2
  4. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/enums/__init__.py +2 -1
  5. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/enums/enums_common_utils.py +26 -7
  6. ipulse_shared_core_ftredge-2.51/src/ipulse_shared_core_ftredge/utils_common.py +369 -0
  7. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/utils_gcp.py +5 -6
  8. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/utils_templates_and_schemas.py +21 -21
  9. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51/src/ipulse_shared_core_ftredge.egg-info}/PKG-INFO +1 -1
  10. ipulse_shared_core_ftredge-2.50/src/ipulse_shared_core_ftredge/utils_common.py +0 -415
  11. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/LICENCE +0 -0
  12. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/README.md +0 -0
  13. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/pyproject.toml +0 -0
  14. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/setup.cfg +0 -0
  15. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/enums/enums_data_eng.py +0 -0
  16. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/enums/enums_module_fincore.py +0 -0
  17. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/enums/enums_modules.py +0 -0
  18. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/models/__init__.py +0 -0
  19. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/models/audit_log_firestore.py +0 -0
  20. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/models/organisation.py +0 -0
  21. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/models/pulse_enums.py +0 -0
  22. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/models/resource_catalog_item.py +0 -0
  23. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/models/user_auth.py +0 -0
  24. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/models/user_profile.py +0 -0
  25. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/models/user_profile_update.py +0 -0
  26. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/models/user_status.py +0 -0
  27. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/tests/__init__.py +0 -0
  28. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge/tests/test.py +0 -0
  29. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge.egg-info/SOURCES.txt +0 -0
  30. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge.egg-info/dependency_links.txt +0 -0
  31. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge.egg-info/requires.txt +0 -0
  32. {ipulse_shared_core_ftredge-2.50 → ipulse_shared_core_ftredge-2.51}/src/ipulse_shared_core_ftredge.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ipulse_shared_core_ftredge
3
- Version: 2.50
3
+ Version: 2.51
4
4
  Summary: Shared Core models and Logger util for the Pulse platform project. Using AI for financial advisory and investment management.
5
5
  Home-page: https://github.com/TheFutureEdge/ipulse_shared_core
6
6
  Author: Russlan Ramdowar
@@ -3,7 +3,7 @@ from setuptools import setup, find_packages
3
3
 
4
4
  setup(
5
5
  name='ipulse_shared_core_ftredge',
6
- version='2.50',
6
+ version='2.51',
7
7
  package_dir={'': 'src'}, # Specify the source directory
8
8
  packages=find_packages(where='src'), # Look for packages in 'src'
9
9
  install_requires=[
@@ -5,9 +5,9 @@ from .utils_gcp import (setup_gcp_logger_and_error_report,
5
5
  write_csv_to_gcs, write_data_to_gcs)
6
6
  from .utils_templates_and_schemas import (create_bigquery_schema_from_json,
7
7
  update_check_with_schema_template)
8
- from .utils_common import (Notice, NoticeSeverity, NoticesManager,SuccessLog, SuccessLogManager)
8
+ from .utils_common import (Notice, NoticesManager)
9
9
 
10
- from .enums import (NoticeSeverity, Unit, Frequency,
10
+ from .enums import (NoticeManagerCategory, NoticeLevel, Unit, Frequency,
11
11
  Module, SubModule, BaseDataCategory,
12
12
  FinCoreCategory, FincCoreSubCategory,
13
13
  FinCoreRecordsCategory, ExchangeOrPublisher,
@@ -3,7 +3,8 @@
3
3
  # pylint: disable=missing-function-docstring
4
4
  # pylint: disable=missing-class-docstring
5
5
 
6
- from .enums_common_utils import (NoticeSeverity,
6
+ from .enums_common_utils import (NoticeLevel,
7
+ NoticeManagerCategory,
7
8
  Unit,
8
9
  Frequency)
9
10
 
@@ -5,27 +5,35 @@
5
5
 
6
6
  from enum import Enum
7
7
 
8
- class NoticeSeverity(Enum):
8
+
9
+ class NoticeManagerCategory(Enum):
10
+ NOTICES = "notices"
11
+ WARN_ERRS = "warn_errs"
12
+ SUCCESSES = "successes"
13
+ class NoticeLevel(Enum):
9
14
  """
10
- Standardized logging levels for data engineering pipelines,
15
+ Standardized notice levels for data engineering pipelines,
11
16
  designed for easy analysis and identification of manual
12
17
  intervention needs.
13
18
  """
14
19
  DEBUG = 100 # Detailed debug information (for development/troubleshooting)
15
- INFO = 200 # Normal pipeline execution information
16
- NOTICE = 300 # Events requiring attention, but not necessarily errors
20
+
21
+ INFO = 200
22
+
23
+ SUCCESS = 300 # Events requiring attention, but not necessarily errors
17
24
 
18
25
  # Warnings indicate potential issues that might require attention:
26
+ WARNING = 400 # General warning, no immediate action required
19
27
  WARNING_NO_ACTION = 401 # Minor issue or Unexpected Behavior, no immediate action required (can be logged frequently)
20
28
  WARNING_REVIEW_RECOMMENDED = 402 # Action recommended to prevent potential future issues
21
29
  WARNING_FIX_RECOMMENDED = 403 # Action recommended to prevent potential future issues
22
30
  WARNING_FIX_REQUIRED = 404 # Action required, pipeline can likely continue
23
31
 
32
+ ERROR = 500 # General error, no immediate action required
24
33
  # Errors indicate a problem that disrupts normal pipeline execution:
25
- ERROR_EXCEPTION_REDO = 502
26
- ERROR_CUSTOM_REDO = 503 # Temporary error, automatic retry likely to succeed
34
+ ERROR_EXCEPTION_REDO = 501
35
+ ERROR_CUSTOM_REDO = 502 # Temporary error, automatic retry likely to succeed
27
36
 
28
-
29
37
  ERROR_EXCEPTION_INVESTIGATE = 601 # Exception occured after some data was likely persisted (e.g., to GCS or BQ)
30
38
  ERROR_CUSTOM_INVESTIGATE= 602
31
39
  ERROR_EXCEPTION_PERSTISTANCE = 603 # Exception occured after data was persisted (e.g., to GCS or BQ)
@@ -37,6 +45,17 @@ class NoticeSeverity(Enum):
37
45
 
38
46
  UNKNOWN=1001 # Unknown error, should not be used in normal operation
39
47
 
48
+
49
+ class NoticeStatus(Enum):
50
+ OPEN = "open"
51
+ ACKNOWLEDGED = "acknowledged"
52
+ IN_PROGRESS = "in_progress"
53
+ RESOLVED = "resolved"
54
+ IGNORED = "ignored"
55
+
56
+
57
+
58
+
40
59
  ### Exception during full exection, partially saved
41
60
  # Exception during ensemble pipeline; modifications collected in local object , nothing persisted
42
61
  # Exception during ensemble pipeline; modifications persisted , metadata failed
@@ -0,0 +1,369 @@
1
+ # pylint: disable=missing-module-docstring
2
+ # pylint: disable=missing-function-docstring
3
+ # pylint: disable=logging-fstring-interpolation
4
+ # pylint: disable=line-too-long
5
+ # pylint: disable=missing-class-docstring
6
+ import traceback
7
+ import json
8
+ import uuid
9
+ from datetime import datetime, timezone
10
+ from contextlib import contextmanager
11
+ from typing import List
12
+ from google.cloud import logging as cloudlogging
13
+ from ipulse_shared_core_ftredge.enums.enums_common_utils import NoticeLevel, NoticeManagerCategory, NoticeStatus
14
+ from ipulse_shared_core_ftredge.utils_gcp import write_data_to_gcs
15
+
16
+
17
+ # ["data_import","data_quality", "data_processing","data_general","data_persistance","metadata_quality", "metadata_processing", "metadata_persistance","metadata_general"]
18
+
19
+ class Notice:
20
+ MAX_TRACEBACK_LINES = 14 # Define the maximum number of traceback lines to include
21
+ def __init__(self, level: NoticeLevel, start_context: str = None, notice_manager_id: str = None,
22
+ e: Exception = None, e_type: str = None, e_message: str = None, e_traceback: str = None,
23
+ subject: str = None, description: str = None, context: str = None,
24
+ notice_status: NoticeStatus = NoticeStatus.OPEN):
25
+ if e is not None:
26
+ e_type = type(e).__name__ if e_type is None else e_type
27
+ e_message = str(e) if e_message is None else e_message
28
+ e_traceback = traceback.format_exc() if e_traceback is None else e_traceback
29
+ elif e_traceback is None and (e_type or e_message):
30
+ e_traceback = traceback.format_exc()
31
+
32
+ self.level = level
33
+ self.subject = subject
34
+ self.description = description
35
+ self._start_context = start_context
36
+ self._context = context
37
+ self.notice_manager_id = notice_manager_id
38
+ self.exception_type = e_type
39
+ self.exception_message = e_message
40
+ self.exception_traceback = self._format_traceback(e_traceback,e_message)
41
+ self.notice_status = notice_status
42
+ self.timestamp = datetime.now(timezone.utc).isoformat()
43
+
44
+ def _format_traceback(self, e_traceback, e_message):
45
+ if not e_traceback or e_traceback == 'None\n':
46
+ return None
47
+
48
+ traceback_lines = e_traceback.splitlines()
49
+
50
+ # Remove lines that are part of the exception message if they are present in traceback
51
+ message_lines = e_message.splitlines() if e_message else []
52
+ if message_lines:
53
+ for message_line in message_lines:
54
+ if message_line in traceback_lines:
55
+ traceback_lines.remove(message_line)
56
+
57
+ # Filter out lines from third-party libraries (like site-packages)
58
+ filtered_lines = [line for line in traceback_lines if "site-packages" not in line]
59
+
60
+ # If filtering results in too few lines, revert to original traceback
61
+ if len(filtered_lines) < 2:
62
+ filtered_lines = traceback_lines
63
+
64
+ # Combine standalone bracket lines with previous or next lines
65
+ combined_lines = []
66
+ for line in filtered_lines:
67
+ if line.strip() in {"(", ")", "{", "}", "[", "]"} and combined_lines:
68
+ combined_lines[-1] += " " + line.strip()
69
+ else:
70
+ combined_lines.append(line)
71
+
72
+ # Determine the number of lines to keep from the start and end
73
+ keep_lines_start = min(self.MAX_TRACEBACK_LINES // 2, len(combined_lines))
74
+ keep_lines_end = min(self.MAX_TRACEBACK_LINES // 2, len(combined_lines) - keep_lines_start)
75
+
76
+ if len(combined_lines) > self.MAX_TRACEBACK_LINES:
77
+ # Include the first few and last few lines, and an indicator of truncation
78
+ formatted_traceback = '\n'.join(
79
+ combined_lines[:keep_lines_start] +
80
+ ['... (truncated) ...'] +
81
+ combined_lines[-keep_lines_end:]
82
+ )
83
+ else:
84
+ formatted_traceback = '\n'.join(combined_lines)
85
+
86
+ return formatted_traceback
87
+
88
+ @property
89
+ def start_context(self):
90
+ return self._start_context
91
+
92
+ @start_context.setter
93
+ def start_context(self, value):
94
+ self._start_context = value
95
+
96
+ @property
97
+ def context(self):
98
+ return self._context
99
+
100
+ @context.setter
101
+ def context(self, value):
102
+ self._context = value
103
+
104
+ def to_dict(self):
105
+ return {
106
+ "start_context": self.start_context,
107
+ "context": self.context,
108
+ "level_code": self.level.value,
109
+ "level_name": self.level.name,
110
+ "subject": self.subject,
111
+ "description": self.description,
112
+ "exception_type": self.exception_type,
113
+ "exception_message": self.exception_message,
114
+ "exception_traceback": self.exception_traceback,
115
+ "notice_status": self.notice_status.value,
116
+ "notice_manager_id": self.notice_manager_id,
117
+ "timestamp": self.timestamp
118
+ }
119
+
120
+ class NoticesManager:
121
+ ERROR_CODE_START_VALUE = NoticeLevel.ERROR.value
122
+ WARNING_CODE_START_VALUE = NoticeLevel.WARNING.value
123
+ SUCCESS_CODE_START_VALUE = NoticeLevel.SUCCESS.value
124
+
125
+ def __init__(self, start_context: str, category: NoticeManagerCategory = NoticeManagerCategory.NOTICES, logger_name=None):
126
+ self._notice_manager_id = str(uuid.uuid4())
127
+ self._notices = []
128
+ self._early_stop = False
129
+ self._error_count = 0
130
+ self._warning_count = 0
131
+ self._success_count = 0
132
+ self._level_counts = {level.name: 0 for level in NoticeLevel}
133
+ self._start_context = start_context
134
+ self._context_stack = []
135
+ self._category = category.value
136
+ self._logger = self._initialize_logger(logger_name)
137
+
138
+ def _initialize_logger(self, logger_name):
139
+ if logger_name:
140
+ logging_client = cloudlogging.Client()
141
+ return logging_client.logger(logger_name)
142
+ return None
143
+
144
+
145
+ @contextmanager
146
+ def context(self, context):
147
+ self.push_context(context)
148
+ try:
149
+ yield
150
+ finally:
151
+ self.pop_context()
152
+
153
+ def push_context(self, context):
154
+ self._context_stack.append(context)
155
+
156
+ def pop_context(self):
157
+ if self._context_stack:
158
+ self._context_stack.pop()
159
+
160
+ def get_current_context(self):
161
+ return " >> ".join(self._context_stack)
162
+
163
+ def get_start_context(self):
164
+ return self._start_context
165
+
166
+ def get_notice_manager_id(self):
167
+ return self._notice_manager_id
168
+
169
+ def set_early_stop(self, max_errors_tolerance:int, create_error_notice=True,pop_context=False):
170
+ self._early_stop = True
171
+ if create_error_notice:
172
+ if pop_context:
173
+ self.pop_context()
174
+ self.add_notice(Notice(level=NoticeLevel.ERROR,
175
+ subject="EARLY_STOP",
176
+ description=f"Total MAX_ERRORS_TOLERANCE of {max_errors_tolerance} has been reached."))
177
+ def reset_early_stop(self):
178
+ self._early_stop = False
179
+
180
+ def get_early_stop(self):
181
+ return self._early_stop
182
+
183
+ def add_notice(self, notice: Notice):
184
+ if (self._category == NoticeManagerCategory.SUCCESSES.value and notice.level != NoticeLevel.SUCCESS) or \
185
+ (self._category == NoticeManagerCategory.WARN_ERRS.value and notice.level.value < self.WARNING_CODE_START_VALUE):
186
+ raise ValueError(f"Invalid notice level {notice.level.name} for category {self._category}")
187
+ notice.start_context = self.get_start_context()
188
+ notice.context = self.get_current_context()
189
+ notice.notice_manager_id = self._notice_manager_id
190
+ notice_dict = notice.to_dict()
191
+ self._notices.append(notice_dict)
192
+ self._update_counts(notice_dict)
193
+
194
+ if self._logger:
195
+ if notice.level.value >= self.WARNING_CODE_START_VALUE:
196
+ self._logger.log_struct(notice_dict, severity="WARNING")
197
+ else:
198
+ self._logger.log_struct(notice_dict, severity="INFO")
199
+
200
+ def add_notices(self, notices: List[Notice]):
201
+ for notice in notices:
202
+ self.add_notice(notice)
203
+
204
+ def clear_notices_and_counts(self):
205
+ self._notices = []
206
+ self._error_count = 0
207
+ self._warning_count = 0
208
+ self._success_count = 0
209
+ self._level_counts = {level.name: 0 for level in NoticeLevel}
210
+
211
+ def clear_notices(self):
212
+ self._notices = []
213
+
214
+ def get_all_notices(self):
215
+ return self._notices
216
+
217
+ def get_notices_for_level(self, level: NoticeLevel):
218
+ return [notice for notice in self._notices if notice["level_code"] == level.value]
219
+
220
+ def get_notices_by_str_in_context(self, context_substring: str):
221
+ return [
222
+ notice for notice in self._notices
223
+ if context_substring in notice["context"]
224
+ ]
225
+
226
+ def contains_errors(self):
227
+ return self._error_count > 0
228
+
229
+ def count_errors(self):
230
+ return self._error_count
231
+
232
+ def contains_warnings_or_errors(self):
233
+ return self._warning_count > 0 or self._error_count > 0
234
+
235
+ def count_warnings_and_errors(self):
236
+ return self._warning_count + self._error_count
237
+
238
+ def count_warnings(self):
239
+ return self._warning_count
240
+
241
+ def count_successes(self):
242
+ return self._success_count
243
+
244
+ def count_all_notices(self):
245
+ return len(self._notices)
246
+
247
+ def count_notices_by_level(self, level: NoticeLevel):
248
+ return self._level_counts.get(level.name, 0)
249
+
250
+ def _count_notices(self, context_substring: str, exact_match=False, level_code_min=None, level_code_max=None):
251
+ return sum(
252
+ 1 for notice in self._notices
253
+ if (notice["context"] == context_substring if exact_match else context_substring in notice["context"]) and
254
+ (level_code_min is None or notice["level_code"] >= level_code_min) and
255
+ (level_code_max is None or notice["level_code"] <= level_code_max)
256
+ )
257
+
258
+ def count_notices_for_current_context(self):
259
+ return self._count_notices(self.get_current_context(), exact_match=True)
260
+
261
+ def count_notices_for_current_and_nested_contexts(self):
262
+ return self._count_notices(self.get_current_context())
263
+
264
+ def count_notices_by_level_for_current_context(self, level: NoticeLevel):
265
+ return self._count_notices(self.get_current_context(), exact_match=True, level_code_min=level.value, level_code_max=level.value)
266
+
267
+ def count_notices_by_level_for_current_and_nested_contexts(self, level: NoticeLevel):
268
+ return self._count_notices(self.get_current_context(), level_code_min=level.value, level_code_max=level.value)
269
+
270
+ def count_errors_for_current_context(self):
271
+ return self._count_notices(self.get_current_context(), exact_match=True, level_code_min=self.ERROR_CODE_START_VALUE)
272
+
273
+ def count_errors_for_current_and_nested_contexts(self):
274
+ return self._count_notices(self.get_current_context(), level_code_min=self.ERROR_CODE_START_VALUE)
275
+
276
+ def count_warnings_and_errors_for_current_context(self):
277
+ return self._count_notices(self.get_current_context(), exact_match=True, level_code_min=self.WARNING_CODE_START_VALUE)
278
+
279
+ def count_warnings_and_errors_for_current_and_nested_contexts(self):
280
+ return self._count_notices(self.get_current_context(), level_code_min=self.WARNING_CODE_START_VALUE)
281
+
282
+ def count_warnings_for_current_context(self):
283
+ return self._count_notices(self.get_current_context(), exact_match=True, level_code_min=self.WARNING_CODE_START_VALUE, level_code_max=self.ERROR_CODE_START_VALUE - 1)
284
+
285
+ def count_warnings_for_current_and_nested_contexts(self):
286
+ return self._count_notices(self.get_current_context(), level_code_min=self.WARNING_CODE_START_VALUE, level_code_max=self.ERROR_CODE_START_VALUE - 1)
287
+
288
+ def count_successes_for_current_context(self):
289
+ return self._count_notices(self.get_current_context(), exact_match=True, level_code_min=self.SUCCESS_CODE_START_VALUE, level_code_max=self.SUCCESS_CODE_START_VALUE)
290
+
291
+ def count_successes_for_current_and_nested_contexts(self):
292
+ return self._count_notices(self.get_current_context(), level_code_min=self.SUCCESS_CODE_START_VALUE, level_code_max=self.SUCCESS_CODE_START_VALUE)
293
+
294
+ def export_notices_to_gcs_file(self, bucket_name, storage_client, file_prefix=None, file_name=None, top_level_context=None, save_locally=False, local_path=None, logger=None, max_retries=2):
295
+ def log_message(message):
296
+ if logger:
297
+ logger.info(message)
298
+
299
+ def log_error(message, exc_info=False):
300
+ if logger:
301
+ logger.error(message, exc_info=exc_info)
302
+
303
+ if not file_prefix:
304
+ file_prefix = self._category
305
+ if not file_name:
306
+ timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
307
+ if top_level_context:
308
+ file_name = f"{file_prefix}_{timestamp}_{top_level_context}_len{len(self._notices)}.json"
309
+ else:
310
+ file_name = f"{file_prefix}_{timestamp}_len{len(self._notices)}.json"
311
+
312
+ cloud_path = None
313
+ local_path = None
314
+ try:
315
+ cloud_path, local_path = write_data_to_gcs(
316
+ bucket_name=bucket_name,
317
+ storage_client=storage_client,
318
+ data=self._notices,
319
+ file_name=file_name,
320
+ save_locally=save_locally,
321
+ local_path=local_path,
322
+ logger=logger,
323
+ max_retries=max_retries
324
+ )
325
+ log_message(f"{file_prefix} successfully saved to GCS at {cloud_path} and locally at {local_path}.")
326
+ except Exception as e:
327
+ log_error(f"Failed at export_notices_to_gcs_file for {file_prefix} for file {file_name} to bucket {bucket_name}: {type(e).__name__} - {str(e)}")
328
+
329
+ return cloud_path, local_path
330
+
331
+ def import_notices_from_json(self, json_or_file, logger=None):
332
+ def log_message(message):
333
+ if logger:
334
+ logger.info(message)
335
+
336
+ def log_warning(message, exc_info=False):
337
+ if logger:
338
+ logger.warning(message, exc_info=exc_info)
339
+
340
+ try:
341
+ if isinstance(json_or_file, str): # Load from string
342
+ imported_notices = json.loads(json_or_file)
343
+ elif hasattr(json_or_file, 'read'): # Load from file-like object
344
+ imported_notices = json.load(json_or_file)
345
+ self.add_notices(imported_notices)
346
+ log_message("Successfully imported notices from json.")
347
+ except Exception as e:
348
+ log_warning(f"Failed to import notices from json: {type(e).__name__} - {str(e)}", exc_info=True)
349
+
350
+ def _update_counts(self, notice, remove=False):
351
+ level_code = notice["level_code"]
352
+ level_name = notice["level_name"]
353
+
354
+ if remove:
355
+ if level_code >= self.ERROR_CODE_START_VALUE:
356
+ self._error_count -= 1
357
+ elif level_code >= self.WARNING_CODE_START_VALUE:
358
+ self._warning_count -= 1
359
+ elif level_code >= self.SUCCESS_CODE_START_VALUE:
360
+ self._success_count -= 1
361
+ self._level_counts[level_name] -= 1
362
+ else:
363
+ if level_code >= self.ERROR_CODE_START_VALUE:
364
+ self._error_count += 1
365
+ elif level_code >= self.WARNING_CODE_START_VALUE:
366
+ self._warning_count += 1
367
+ elif level_code == self.SUCCESS_CODE_START_VALUE:
368
+ self._success_count += 1
369
+ self._level_counts[level_name] += 1
@@ -7,7 +7,6 @@ from io import StringIO
7
7
  import logging
8
8
  import os
9
9
  import time
10
- from datetime import datetime, timezone
11
10
  import traceback
12
11
  from google.cloud import error_reporting, logging as cloud_logging
13
12
  from google.api_core.exceptions import NotFound
@@ -182,12 +181,12 @@ def write_data_to_gcs(bucket_name, storage_client, data, file_name=None,
182
181
  log_message(f"Successfully saved file to GCS {cloud_path}.")
183
182
  success = True
184
183
  except Exception as e:
185
- gcs_upload_exception = e
184
+ gcs_upload_exception = e
186
185
  attempts += 1
187
- log_error(f"Attempt {attempts} - Failed to write {file_name} "
188
- f"to GCS bucket '{bucket_name}': {e}") # Log with full traceback
189
186
  if attempts < max_retries:
190
187
  time.sleep(2 ** attempts)
188
+ else:
189
+ log_error(f"Failed to write {file_name} to GCS bucket {bucket_name} after {max_retries} attempts :{e}")
191
190
 
192
191
  if not success and (save_locally or local_path):
193
192
  try:
@@ -203,8 +202,8 @@ def write_data_to_gcs(bucket_name, storage_client, data, file_name=None,
203
202
 
204
203
  # If GCS upload failed, raise a single exception here
205
204
 
206
- if gcs_upload_exception:
207
- raise gcs_upload_exception from None # Propagate without nesting
205
+ if gcs_upload_exception is not None:
206
+ raise gcs_upload_exception # Propagate without nesting
208
207
 
209
208
  return cloud_path, local_path_final
210
209
 
@@ -5,7 +5,7 @@
5
5
 
6
6
  import datetime
7
7
  from google.cloud import bigquery
8
- from ipulse_shared_core_ftredge.enums.enums_common_utils import NoticeSeverity
8
+ from ipulse_shared_core_ftredge.enums.enums_common_utils import NoticeLevel
9
9
  from ipulse_shared_core_ftredge.utils_common import Notice
10
10
 
11
11
 
@@ -61,9 +61,9 @@ def update_check_with_schema_template(updates, schema, dt_ts_to_str=True, check_
61
61
  valid_updates[field_name] = value
62
62
 
63
63
  elif mode == "REQUIRED":
64
- notice=Notice(severity=NoticeSeverity.WARNING_FIX_REQUIRED,
64
+ notice=Notice(level=NoticeLevel.WARNING_FIX_REQUIRED,
65
65
  subject=field_name,
66
- message=f"Required field '{field_name}' is missing in the updates.")
66
+ description=f"Required field '{field_name}' is missing in the updates.")
67
67
 
68
68
  notices.append(notice)
69
69
 
@@ -82,13 +82,13 @@ def handle_date_fields(field_name, value, dt_ts_to_str):
82
82
  return value, None
83
83
  return parsed_date, None
84
84
  except ValueError:
85
- return None, Notice(severity=NoticeSeverity.WARNING_FIX_REQUIRED,
85
+ return None, Notice(level=NoticeLevel.WARNING_FIX_REQUIRED,
86
86
  subject=field_name,
87
- message=f"Expected a DATE in YYYY-MM-DD format but got {value}.")
87
+ description=f"Expected a DATE in YYYY-MM-DD format but got {value}.")
88
88
  else:
89
- return None, Notice(severity=NoticeSeverity.WARNING_FIX_REQUIRED,
89
+ return None, Notice(level=NoticeLevel.WARNING_FIX_REQUIRED,
90
90
  subject=field_name,
91
- message= f"Expected a DATE or YYYY-MM-DD str format but got {value} of type {type(value).__name__}.")
91
+ description= f"Expected a DATE or YYYY-MM-DD str format but got {value} of type {type(value).__name__}.")
92
92
 
93
93
 
94
94
  def handle_timestamp_fields(field_name, value, dt_ts_to_str):
@@ -104,21 +104,21 @@ def handle_timestamp_fields(field_name, value, dt_ts_to_str):
104
104
  return value, None
105
105
  return parsed_datetime, None
106
106
  except ValueError:
107
- return None, Notice(severity=NoticeSeverity.WARNING_FIX_REQUIRED,
107
+ return None, Notice(level=NoticeLevel.WARNING_FIX_REQUIRED,
108
108
  subject=field_name,
109
- message= f"Expected ISO format TIMESTAMP but got {value}.")
109
+ description= f"Expected ISO format TIMESTAMP but got {value}.")
110
110
  else:
111
- return None, Notice(severity=NoticeSeverity.WARNING_FIX_REQUIRED,
111
+ return None, Notice(level=NoticeLevel.WARNING_FIX_REQUIRED,
112
112
  subject=field_name,
113
- message= f"Expected ISO format TIMESTAMP but got {value} of type {type(value).__name__}.")
113
+ description= f"Expected ISO format TIMESTAMP but got {value} of type {type(value).__name__}.")
114
114
 
115
115
 
116
116
  def check_and_truncate_length(field_name, value, max_length):
117
117
  """Checks and truncates the length of string fields if they exceed the max length."""
118
118
  if isinstance(value, str) and len(value) > max_length:
119
- return value[:max_length], Notice(severity=NoticeSeverity.WARNING_FIX_RECOMMENDED,
119
+ return value[:max_length], Notice(level=NoticeLevel.WARNING_FIX_RECOMMENDED,
120
120
  subject= field_name,
121
- message= f"Field exceeds max length: {len(value)}/{max_length}. Truncating.")
121
+ description= f"Field exceeds max length: {len(value)}/{max_length}. Truncating.")
122
122
 
123
123
  return value, None
124
124
 
@@ -126,27 +126,27 @@ def check_and_truncate_length(field_name, value, max_length):
126
126
 
127
127
  def handle_type_conversion(field_type, field_name, value):
128
128
  if field_type == "STRING" and not isinstance(value, str):
129
- return str(value), Notice(severity=NoticeSeverity.WARNING_REVIEW_RECOMMENDED,
129
+ return str(value), Notice(level=NoticeLevel.WARNING_REVIEW_RECOMMENDED,
130
130
  subject=field_name,
131
- message= f"Expected STRING but got {value} of type {type(value).__name__}.")
131
+ description= f"Expected STRING but got {value} of type {type(value).__name__}.")
132
132
 
133
133
  if field_type == "INT64" and not isinstance(value, int):
134
134
  try:
135
135
  return int(value), None
136
136
  except ValueError:
137
- return None, Notice(severity=NoticeSeverity.WARNING_FIX_REQUIRED,
137
+ return None, Notice(level=NoticeLevel.WARNING_FIX_REQUIRED,
138
138
  subject= field_name,
139
- message=f"Expected INTEGER, but got {value} of type {type(value).__name__}.")
139
+ description=f"Expected INTEGER, but got {value} of type {type(value).__name__}.")
140
140
  if field_type == "FLOAT64" and not isinstance(value, float):
141
141
  try:
142
142
  return float(value), None
143
143
  except ValueError:
144
- return None, Notice(severity=NoticeSeverity.WARNING_FIX_REQUIRED,
144
+ return None, Notice(level=NoticeLevel.WARNING_FIX_REQUIRED,
145
145
  subject=field_name,
146
- message=f"Expected FLOAT, but got {value} of type {type(value).__name__}.")
146
+ description=f"Expected FLOAT, but got {value} of type {type(value).__name__}.")
147
147
  if field_type == "BOOL" and not isinstance(value, bool):
148
- return bool(value), Notice(severity=NoticeSeverity.WARNING_REVIEW_RECOMMENDED,
148
+ return bool(value), Notice(level=NoticeLevel.WARNING_REVIEW_RECOMMENDED,
149
149
  subject=field_name,
150
- message=f"Expected BOOL, but got {value}. Converting as {bool(value)}.")
150
+ description=f"Expected BOOL, but got {value}. Converting as {bool(value)}.")
151
151
 
152
152
  return value, None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ipulse_shared_core_ftredge
3
- Version: 2.50
3
+ Version: 2.51
4
4
  Summary: Shared Core models and Logger util for the Pulse platform project. Using AI for financial advisory and investment management.
5
5
  Home-page: https://github.com/TheFutureEdge/ipulse_shared_core
6
6
  Author: Russlan Ramdowar
@@ -1,415 +0,0 @@
1
- # pylint: disable=missing-module-docstring
2
- # pylint: disable=missing-function-docstring
3
- # pylint: disable=logging-fstring-interpolation
4
- # pylint: disable=line-too-long
5
- import traceback
6
- import json
7
- import os
8
- import time
9
- from datetime import datetime, timezone
10
- from contextlib import contextmanager
11
- from typing import List
12
- from ipulse_shared_core_ftredge.enums.enums_common_utils import NoticeSeverity
13
- from ipulse_shared_core_ftredge.utils_gcp import write_data_to_gcs
14
-
15
- def create_notice(severity, e=None, e_type=None, e_message=None, e_traceback=None, subject=None, message=None,context=None):
16
- # Validate input: ensure severity is provided, use a default if not
17
- if severity is None:
18
- severity = NoticeSeverity.UNKNOWN # Assume Severity.UNKNOWN is a default fallback
19
-
20
- # If an exception object is provided, use it to extract details
21
- if e is not None:
22
- e_type = type(e).__name__ if e_type is None else e_type
23
- e_message = str(e) if e_message is None else e_message
24
- e_traceback = traceback.format_exc() if e_traceback is None else e_traceback
25
- else:
26
- # Calculate traceback if not provided and if exception details are partially present
27
- if e_traceback is None and (e_type or e_message):
28
- e_traceback = traceback.format_exc()
29
-
30
- # Prepare the base notice dictionary with all fields
31
- notice = {
32
- "severity_code": severity.value,
33
- "severity_name": severity.name,
34
- "subject": subject,
35
- "message": message,
36
- "exception_code": e_type,
37
- "exception_message": e_message,
38
- "exception_traceback": e_traceback or None, # Ensure field is present even if traceback isn't calculated
39
- "context": context or ""
40
- }
41
- return notice
42
-
43
-
44
-
45
-
46
- def merge_notices_dicts(dict1, dict2):
47
- """
48
- Merge two dictionaries of lists, combining lists for overlapping keys.
49
-
50
- Parameters:
51
- dict1 (dict): The first dictionary of lists.
52
- dict2 (dict): The second dictionary of lists.
53
-
54
- Returns:
55
- dict: A new dictionary with combined lists for overlapping keys.
56
- """
57
- merged_dict = {}
58
-
59
- # Get all unique keys from both dictionaries
60
- all_keys = set(dict1) | set(dict2)
61
-
62
- for key in all_keys:
63
- # Combine lists from both dictionaries for each key
64
- merged_dict[key] = dict1.get(key, []) + dict2.get(key, [])
65
-
66
- return merged_dict
67
-
68
-
69
- # ["data_import","data_quality", "data_processing","data_general","data_persistance","metadata_quality", "metadata_processing", "metadata_persistance","metadata_general"]
70
-
71
- class Notice:
72
- def __init__(self, severity: NoticeSeverity, e: Exception = None, e_type: str = None, e_message: str = None, e_traceback: str = None, subject: str = None, message: str = None, context: str = None):
73
-
74
- # If an exception object is provided, use it to extract details
75
- if e is not None:
76
- e_type = type(e).__name__ if e_type is None else e_type
77
- e_message = str(e) if e_message is None else e_message
78
- e_traceback = traceback.format_exc() if e_traceback is None else e_traceback
79
- # If exception details are provided but not from an exception object
80
- elif e_traceback is None and (e_type or e_message):
81
- e_traceback = traceback.format_exc()
82
-
83
- self.timestamp = datetime.now(timezone.utc).isoformat()
84
- self.severity = severity
85
- self.subject = subject
86
- self.message = message
87
- self.context = context
88
- self.exception_type = e_type
89
- self.exception_message = e_message
90
- self.exception_traceback = e_traceback
91
-
92
- def to_dict(self):
93
- return {
94
- "context": self.context,
95
- "severity_code": self.severity.value,
96
- "severity_name": self.severity.name,
97
- "subject": self.subject,
98
- "message": self.message,
99
- "exception_type": self.exception_type,
100
- "exception_message": self.exception_message,
101
- "exception_traceback": self.exception_traceback,
102
- }
103
-
104
- class NoticesManager:
105
- ERROR_CODE_START_VALUE = 500
106
-
107
- def __init__(self):
108
- self.notices = []
109
- self.error_count = 0
110
- self.severity_counts = {severity.name: 0 for severity in NoticeSeverity}
111
- self.context_stack = []
112
-
113
- @contextmanager
114
- def notice_context(self, context):
115
- self.push_context(context)
116
- try:
117
- yield
118
- finally:
119
- self.pop_context()
120
-
121
- def push_context(self, context):
122
- self.context_stack.append(context)
123
-
124
- def pop_context(self):
125
- if self.context_stack:
126
- self.context_stack.pop()
127
-
128
- def get_notices_by_context(self, context_substring: str):
129
- return [
130
- notice for notice in self.notices
131
- if context_substring in notice["context"]
132
- ]
133
-
134
- def get_current_context(self):
135
- return " >> ".join(self.context_stack)
136
-
137
- def get_all_notices(self):
138
- return self.notices
139
- def add_notice(self, notice: Notice):
140
- notice.context = self.get_current_context()
141
- notice_dict = notice.to_dict()
142
- self.notices.append(notice_dict)
143
- self._update_counts(notice_dict)
144
-
145
- def add_notices(self, notices: List[Notice]):
146
- for notice in notices:
147
- notice.context = self.get_current_context()
148
- notice_dict = notice.to_dict()
149
- self.notices.append(notice_dict)
150
- self._update_counts(notice_dict)
151
-
152
- def remove_notice(self, notice: Notice):
153
- notice_dict = notice.to_dict()
154
- if notice_dict in self.notices:
155
- self.notices.remove(notice_dict)
156
- self._update_counts(notice_dict, remove=True)
157
-
158
- def clear_notices(self):
159
- self.notices = []
160
- self.error_count = 0
161
- self.severity_counts = {severity.name: 0 for severity in NoticeSeverity}
162
-
163
- def contains_errors(self):
164
- return self.error_count > 0
165
-
166
- def count_errors(self):
167
- return self.error_count
168
-
169
- def count_notices_by_severity(self, severity: NoticeSeverity):
170
- return self.severity_counts.get(severity.name, 0)
171
-
172
- def count_errors_for_current_context(self):
173
- current_context = self.get_current_context()
174
- return sum(
175
- 1 for notice in self.notices
176
- if notice["context"] == current_context and notice["severity_code"] >= self.ERROR_CODE_START_VALUE
177
- )
178
- def count_all_notices(self):
179
- return len(self.notices)
180
-
181
- def count_notices_for_current_context(self):
182
- current_context = self.get_current_context()
183
- return sum(
184
- 1 for notice in self.notices
185
- if notice["context"] == current_context
186
- )
187
-
188
- def count_notices_by_severity_for_current_context(self, severity: NoticeSeverity):
189
- current_context = self.get_current_context()
190
- return sum(
191
- 1 for notice in self.notices
192
- if notice["context"] == current_context and notice["severity_code"] == severity.value
193
- )
194
- def count_notices_for_current_and_nested_contexts(self):
195
- current_context = self.get_current_context()
196
- return sum(
197
- 1 for notice in self.notices
198
- if current_context in notice["context"]
199
- )
200
- def count_errors_for_current_and_nested_contexts(self):
201
- current_context = self.get_current_context()
202
- return sum(
203
- 1 for notice in self.notices
204
- if current_context in notice["context"] and notice["severity_code"] >= self.ERROR_CODE_START_VALUE
205
- )
206
- def count_notices_by_severity_for_current_and_nested_contexts(self, severity: NoticeSeverity):
207
- current_context = self.get_current_context()
208
- return sum(
209
- 1 for notice in self.notices
210
- if current_context in notice["context"] and notice["severity_code"] == severity.value
211
- )
212
-
213
- def export_notices_to_gcs_file(self, bucket_name, storage_client, file_name=None, top_level_context=None, save_locally=False, local_path=None, logger=None, max_retries=2):
214
- def log_message(message):
215
- if logger:
216
- logger.info(message)
217
-
218
- def log_error(message, exc_info=False):
219
- if logger:
220
- logger.error(message, exc_info=exc_info)
221
-
222
- if not file_name:
223
- timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
224
- if top_level_context:
225
- file_name = f"notices_{timestamp}_{top_level_context}_len{len(self.notices)}.json"
226
- else:
227
- file_name = f"notices_{timestamp}_len{len(self.notices)}.json"
228
-
229
- cloud_path = None # Initialize cloud_path here
230
- local_path = None # Initialize local_path here
231
- try:
232
- cloud_path, local_path = write_data_to_gcs(
233
- bucket_name=bucket_name,
234
- storage_client=storage_client,
235
- data=self.notices,
236
- file_name=file_name,
237
- save_locally=save_locally,
238
- local_path=local_path,
239
- logger=logger,
240
- max_retries=max_retries
241
- )
242
- log_message(f"Notices successfully saved to GCS at {cloud_path} and locally at {local_path}.")
243
- except Exception as e:
244
- log_error(f"Failed to export notices: {type(e).__name__} - {str(e)}", exc_info=True)
245
-
246
- return cloud_path , local_path
247
-
248
- def import_notices_from_json(self, json_or_file, logger=None):
249
- def log_message(message):
250
- if logger:
251
- logger.info(message)
252
- else:
253
- print(message)
254
-
255
- def log_error(message, exc_info=False):
256
- if logger:
257
- logger.error(message, exc_info=exc_info)
258
- else:
259
- print(message)
260
- try:
261
- if isinstance(json_or_file, str): # Load from string
262
- imported_notices = json.loads(json_or_file)
263
- elif hasattr(json_or_file, 'read'): # Load from file-like object
264
- imported_notices = json.load(json_or_file)
265
- self.add_notice(imported_notices)
266
- log_message("Successfully imported notices from json.")
267
- except Exception as e:
268
- log_error(f"Failed to import notices from json: {type(e).__name__} - {str(e)}", exc_info=True)
269
-
270
- def _update_counts(self, notice, remove=False):
271
- if remove:
272
- if notice["severity_code"] >= self.ERROR_CODE_START_VALUE:
273
- self.error_count -= 1
274
- self.severity_counts[notice["severity_name"]] -= 1
275
- else:
276
- if notice["severity_code"] >= self.ERROR_CODE_START_VALUE:
277
- self.error_count += 1
278
- self.severity_counts[notice["severity_name"]] += 1
279
-
280
-
281
- class SuccessLog:
282
- def __init__(self, subject:str, description:str=None, context:str=None):
283
- self.context = context
284
- self.subject = subject
285
- self.timestamp = datetime.now(timezone.utc).isoformat()
286
- self.description = description
287
-
288
- def to_dict(self):
289
- return {
290
- "context": self.context or "",
291
- "subject": self.subject,
292
- "timestamp": self.timestamp,
293
- "description": self.description or ""
294
- }
295
-
296
-
297
- class SuccessLogManager:
298
- def __init__(self):
299
- self.successlogs = []
300
- self.context_stack = []
301
-
302
- @contextmanager
303
- def successlog_context(self, context):
304
- self.push_context(context)
305
- try:
306
- yield
307
- finally:
308
- self.pop_context()
309
-
310
- def push_context(self, context):
311
- self.context_stack.append(context)
312
-
313
- def pop_context(self):
314
- if self.context_stack:
315
- self.context_stack.pop()
316
-
317
- def get_current_context(self):
318
- return " >> ".join(self.context_stack)
319
-
320
- def get_all_successlogs(self):
321
- return self.successlogs
322
-
323
- def add_successlog(self, successlog: SuccessLog):
324
- successlog.context = self.get_current_context()
325
- successlog_dict = successlog.to_dict()
326
- self.successlogs.append(successlog_dict)
327
-
328
- def add_successlogs(self, successlogs: List[SuccessLog]):
329
- for successlog in successlogs:
330
- successlog.context = self.get_current_context()
331
- successlog_dict = successlog.to_dict()
332
- self.successlogs.append(successlog_dict)
333
-
334
- def remove_successlog(self, successlog: SuccessLog):
335
- successlog_dict = successlog.to_dict()
336
- if successlog_dict in self.successlogs:
337
- self.successlogs.remove(successlog_dict)
338
-
339
- def clear_successlogs(self):
340
- self.successlogs = []
341
-
342
- def count_all_successlogs(self):
343
- return len(self.successlogs)
344
-
345
- def count_successlogs_for_current_context(self):
346
- current_context = self.get_current_context()
347
- return sum(
348
- 1 for successlog in self.successlogs
349
- if successlog["context"] == current_context
350
- )
351
-
352
- def count_successlogs_for_current_and_nested_contexts(self):
353
- current_context = self.get_current_context()
354
- return sum(
355
- 1 for successlog in self.successlogs
356
- if current_context in successlog["context"]
357
- )
358
-
359
-
360
- def export_successlogs_to_gcs_file(self, bucket_name, storage_client, file_name=None, top_level_context=None, save_locally=False, local_path=None, logger=None, max_retries=3):
361
- def log_message(message):
362
- if logger:
363
- logger.info(message)
364
-
365
- def log_error(message, exc_info=False):
366
- if logger:
367
- logger.error(message, exc_info=exc_info)
368
-
369
- if not file_name:
370
- timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
371
- if top_level_context:
372
- file_name = f"successlogs_{timestamp}_{top_level_context}_len{len(self.successlogs)}.json"
373
- else:
374
- file_name = f"successlogs_{timestamp}_len{len(self.successlogs)}.json"
375
-
376
- cloud_path=None
377
- local_path=None
378
- try:
379
- cloud_path, local_path = write_data_to_gcs(
380
- bucket_name=bucket_name,
381
- storage_client=storage_client,
382
- data=self.successlogs,
383
- file_name=file_name,
384
- save_locally=save_locally,
385
- local_path=local_path,
386
- logger=logger,
387
- max_retries=max_retries
388
- )
389
- log_message(f"Success logs successfully saved to GCS at {cloud_path} and locally at {local_path}.")
390
- except Exception as e:
391
- log_error(f"Failed to export success logs: {type(e).__name__} - {str(e)}", exc_info=True)
392
-
393
- return cloud_path, local_path
394
-
395
- def import_successlogs_from_json(self, json_or_file, logger=None):
396
- def log_message(message):
397
- if logger:
398
- logger.info(message)
399
- else:
400
- print(message)
401
-
402
- def log_error(message, exc_info=False):
403
- if logger:
404
- logger.error(message, exc_info=exc_info)
405
- else:
406
- print(message)
407
- try:
408
- if isinstance(json_or_file, str): # Load from string
409
- imported_success_logs = json.loads(json_or_file)
410
- elif hasattr(json_or_file, 'read'): # Load from file-like object
411
- imported_success_logs = json.load(json_or_file)
412
- self.add_successlog(imported_success_logs)
413
- log_message("Successfully imported success logs from json.")
414
- except Exception as e:
415
- log_error(f"Failed to import success logs from json: {type(e).__name__} - {str(e)}", exc_info=True)