ipulse-shared-core-ftredge 2.6__py3-none-any.whl → 2.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.

Files changed (36) hide show
  1. ipulse_shared_core_ftredge/__init__.py +21 -4
  2. ipulse_shared_core_ftredge/enums/__init__.py +32 -0
  3. ipulse_shared_core_ftredge/enums/enums_cloud.py +17 -0
  4. ipulse_shared_core_ftredge/enums/enums_common_utils.py +98 -0
  5. ipulse_shared_core_ftredge/enums/enums_data_eng.py +109 -0
  6. ipulse_shared_core_ftredge/enums/enums_logs.py +79 -0
  7. ipulse_shared_core_ftredge/enums/enums_module_fincore.py +58 -0
  8. ipulse_shared_core_ftredge/enums/enums_modules.py +25 -0
  9. ipulse_shared_core_ftredge/{models → enums}/pulse_enums.py +10 -46
  10. ipulse_shared_core_ftredge/models/__init__.py +0 -1
  11. ipulse_shared_core_ftredge/models/organisation.py +61 -55
  12. ipulse_shared_core_ftredge/models/resource_catalog_item.py +97 -171
  13. ipulse_shared_core_ftredge/models/user_profile.py +10 -9
  14. ipulse_shared_core_ftredge/models/user_profile_update.py +32 -14
  15. ipulse_shared_core_ftredge/models/user_status.py +21 -11
  16. ipulse_shared_core_ftredge/utils/__init__.py +19 -0
  17. ipulse_shared_core_ftredge/utils/logs/__init__.py +2 -0
  18. ipulse_shared_core_ftredge/{models → utils/logs}/audit_log_firestore.py +1 -1
  19. ipulse_shared_core_ftredge/utils/logs/context_log.py +211 -0
  20. ipulse_shared_core_ftredge/utils/logs/get_logger.py +76 -0
  21. ipulse_shared_core_ftredge/utils/utils_cloud.py +44 -0
  22. ipulse_shared_core_ftredge/utils/utils_cloud_gcp.py +311 -0
  23. ipulse_shared_core_ftredge/utils/utils_cloud_gcp_with_collectors.py +169 -0
  24. ipulse_shared_core_ftredge/utils/utils_cloud_with_collectors.py +26 -0
  25. ipulse_shared_core_ftredge/utils/utils_collector_pipelinemon.py +356 -0
  26. ipulse_shared_core_ftredge/utils/utils_common.py +145 -0
  27. ipulse_shared_core_ftredge/utils/utils_templates_and_schemas.py +151 -0
  28. ipulse_shared_core_ftredge-2.6.1.dist-info/METADATA +14 -0
  29. ipulse_shared_core_ftredge-2.6.1.dist-info/RECORD +33 -0
  30. {ipulse_shared_core_ftredge-2.6.dist-info → ipulse_shared_core_ftredge-2.6.1.dist-info}/WHEEL +1 -1
  31. ipulse_shared_core_ftredge/tests/__init__.py +0 -0
  32. ipulse_shared_core_ftredge/tests/test.py +0 -17
  33. ipulse_shared_core_ftredge-2.6.dist-info/METADATA +0 -11
  34. ipulse_shared_core_ftredge-2.6.dist-info/RECORD +0 -17
  35. {ipulse_shared_core_ftredge-2.6.dist-info → ipulse_shared_core_ftredge-2.6.1.dist-info}/LICENCE +0 -0
  36. {ipulse_shared_core_ftredge-2.6.dist-info → ipulse_shared_core_ftredge-2.6.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,169 @@
1
+ # pylint: disable=missing-module-docstring
2
+ # pylint: disable=missing-function-docstring
3
+ # pylint: disable=missing-class-docstring
4
+ # pylint: disable=broad-exception-caught
5
+ # pylint: disable=line-too-long
6
+ # pylint: disable=unused-variable
7
+ # pylint: disable=broad-exception-raised
8
+
9
+ import json
10
+ import os
11
+ import time
12
+ from typing import Optional
13
+ from google.cloud.storage import Client as GCSClient
14
+ from ipulse_shared_core_ftredge.enums.enums_logs import LogLevel
15
+ from .utils_collector_pipelinemon import Pipelinemon
16
+ from .logs.context_log import ContextLog
17
+
18
+
19
+ ############################################################################
20
+ ##################### SETTING UP LOGGER ##########################
21
+
22
+
23
+
24
+
25
+ def write_json_to_gcs_with_pipelinemon_extended( pipelinemon:Pipelinemon, storage_client:GCSClient, data:dict | list | str, bucket_name: str, file_name: str,
26
+ file_exists_if_starts_with_prefix:Optional[str] =None, overwrite_if_exists:bool=False, increment_if_exists:bool=False,
27
+ max_retries:int=2, max_deletable_files:int=1):
28
+ """Saves data to Google Cloud Storage and optionally locally.
29
+
30
+ This function attempts to upload data to GCS.
31
+ - If the upload fails after retries and `save_locally` is True or `local_path` is provided, it attempts to save the data locally.
32
+ - It handles file name conflicts based on these rules:
33
+ - If `overwrite_if_exists` is True:
34
+ - If `file_exists_if_contains_substr` is provided, ANY existing file containing the substring is deleted, and the new file is saved with the provided `file_name`.
35
+ - If `file_exists_if_contains_substr` is None, and a file with the exact `file_name` exists, it's overwritten.
36
+ - If `increment_if_exists` is True:
37
+ - If `file_exists_if_contains_substr` is provided, a new file with an incremented version is created ONLY if a file with the EXACT `file_name` exists.
38
+ - If `file_exists_if_contains_substr` is None, a new file with an incremented version is created if a file with the exact `file_name` exists.
39
+
40
+ -If both overwrite_if_exists and increment_if_exists are provided as Ture, an exception will be raised.
41
+ """
42
+
43
+ cloud_storage_ref="GCP_GCS"
44
+
45
+ with pipelinemon.context(f"write_json_to_{cloud_storage_ref}_with_pipelinemon"):
46
+ cloud_storage_upload_error = None
47
+ # Input validation
48
+ if overwrite_if_exists and increment_if_exists:
49
+ err_msg="Both 'overwrite_if_exists' and 'increment_if_exists' cannot be True simultaneously."
50
+ pipelinemon.add_log(ContextLog(LogLevel.ERROR_CUSTOM, subject="Param validation", description=err_msg))
51
+ return {"cloud_storage_upload_error": err_msg}
52
+ if max_deletable_files > 10:
53
+ err_msg="max_deletable_files should be less than 10 for safety. For more use another method."
54
+ pipelinemon.add_log(ContextLog(LogLevel.ERROR_CUSTOM,subject="max_deletable_files", description=err_msg))
55
+ return {"cloud_storage_upload_error": err_msg}
56
+
57
+ # Prepare data
58
+ if isinstance(data, (list, dict)):
59
+ data_str = json.dumps(data, indent=2)
60
+ else:
61
+ data_str = data
62
+
63
+ bucket = storage_client.bucket(bucket_name)
64
+ base_file_name, ext = os.path.splitext(file_name)
65
+ increment = 0
66
+ attempts = 0
67
+ success = False
68
+
69
+ # GCS-related metadata
70
+ cloud_storage_path = None
71
+ cloud_storage_file_overwritten = False
72
+ cloud_storage_file_already_exists = False
73
+ cloud_storage_file_saved_with_increment = False
74
+ cloud_storage_file_exists_checked_on_name = file_name
75
+ cloud_storage_deleted_files=[]
76
+
77
+ try:
78
+ upload_allowed = True
79
+ # --- Overwrite Logic ---
80
+ if overwrite_if_exists:
81
+ with pipelinemon.context("overwriting"):
82
+ if file_exists_if_starts_with_prefix:
83
+ cloud_storage_file_exists_checked_on_name = file_exists_if_starts_with_prefix
84
+ blobs_to_delete = list(bucket.list_blobs(prefix=file_exists_if_starts_with_prefix))
85
+ if len(blobs_to_delete) > max_deletable_files:
86
+ err_msg=f"Error: Attempt to delete {len(blobs_to_delete)} matched files, but limit is {max_deletable_files}."
87
+ pipelinemon.add_log(ContextLog(LogLevel.NOTICE_ALREADY_EXISTS, subject=file_exists_if_starts_with_prefix, description=f"Prefix matched with {len(blobs_to_delete)} files in bucket {bucket_name}"))
88
+ pipelinemon.add_log(ContextLog(LogLevel.ERROR_CUSTOM, subject="Too many files", description=err_msg))
89
+ #### Ensuring to quit the operation if too many files are found, it will be catched below
90
+ return {"cloud_storage_upload_error": err_msg}
91
+ if blobs_to_delete:
92
+ cloud_storage_file_already_exists = True
93
+ pipelinemon.add_log(ContextLog(LogLevel.NOTICE_ALREADY_EXISTS, subject=file_exists_if_starts_with_prefix, description=f"Prefix matched with {len(blobs_to_delete)} files in bucket {bucket_name}"))
94
+ for blob in blobs_to_delete:
95
+ cloud_storage_path_del = f"gs://{bucket_name}/{blob.name}"
96
+ pipelinemon.add_system_impacted(f"delete: {cloud_storage_ref}_bucket_file: {cloud_storage_path_del}")
97
+ blob.delete()
98
+ pipelinemon.add_log(ContextLog(LogLevel.INFO_REMOTE_DELETE_COMPLETE, subject= cloud_storage_path_del, description=f"file deleted from {cloud_storage_ref} as part of overwrite, matched with prefix"))
99
+ cloud_storage_deleted_files.append(cloud_storage_path_del)
100
+ cloud_storage_file_overwritten = True
101
+ elif bucket.blob(file_name).exists():
102
+ cloud_storage_file_already_exists = True
103
+ pipelinemon.add_log(ContextLog(LogLevel.NOTICE_ALREADY_EXISTS, subject=file_name, description=f"Exact name matched with existing file in bucket {bucket_name}"))
104
+ cloud_storage_path_del = f"gs://{bucket_name}/{file_name}"
105
+ pipelinemon.add_system_impacted(f"delete: {cloud_storage_ref}_bucket_file: {cloud_storage_path_del}")
106
+ blob.delete() # Delete the existing blob
107
+ pipelinemon.add_log(ContextLog(LogLevel.INFO_REMOTE_DELETE_COMPLETE, subject= cloud_storage_path_del, description=f"file deleted from {cloud_storage_ref} as part of overwrite, matched with exact name"))
108
+ cloud_storage_deleted_files.append(cloud_storage_path_del)
109
+ cloud_storage_file_overwritten = True
110
+ # --- Increment Logic ---
111
+ elif increment_if_exists:
112
+ with pipelinemon.context("incrementing"):
113
+ cloud_storage_file_exists_checked_on_name = file_name # We only increment if the exact name exists
114
+ while bucket.blob(file_name).exists():
115
+ cloud_storage_file_already_exists = True
116
+ increment += 1
117
+ file_name = f"{base_file_name}_v{increment}{ext}"
118
+ cloud_storage_file_saved_with_increment = True
119
+ if increment>0:
120
+ cloud_storage_path = f"gs://{bucket_name}/{file_name}"
121
+ pipelinemon.add_log(ContextLog(LogLevel.NOTICE_ALREADY_EXISTS, subject=file_name, description=f"Attempting to save file with incremented version in {bucket_name}"))
122
+ # --- Check for Conflicts (Including Prefix) ---
123
+ else:
124
+ if file_exists_if_starts_with_prefix:
125
+ blobs_matched = list(bucket.list_blobs(prefix=file_exists_if_starts_with_prefix))
126
+ cloud_storage_file_exists_checked_on_name = file_exists_if_starts_with_prefix
127
+ if blobs_matched:
128
+ upload_allowed = False
129
+ cloud_storage_file_already_exists = True
130
+ pipelinemon.add_log(ContextLog(LogLevel.NOTICE_ALREADY_EXISTS, subject=file_exists_if_starts_with_prefix, description=f"Prefix matched with {len(blobs_matched)} existing files in bucket {bucket_name}."))
131
+ elif bucket.blob(file_name).exists():
132
+ pipelinemon.add_log(ContextLog(LogLevel.NOTICE_ALREADY_EXISTS, subject=file_name, description=f"Exact name matched with existing file in bucket {bucket_name}."))
133
+ upload_allowed = False
134
+ cloud_storage_file_already_exists = True
135
+
136
+ # --- GCS Upload ---
137
+ cloud_storage_path = f"gs://{bucket_name}/{file_name}"
138
+ if overwrite_if_exists or increment_if_exists or upload_allowed:
139
+ with pipelinemon.context("uploading"):
140
+ while attempts < max_retries and not success:
141
+ try:
142
+ blob = bucket.blob(file_name) # Use the potentially updated file_name
143
+ pipelinemon.add_system_impacted(f"upload: {cloud_storage_ref}_bucket_file: {cloud_storage_path}")
144
+ blob.upload_from_string(data_str, content_type='application/json')
145
+ pipelinemon.add_log(ContextLog(LogLevel.INFO_REMOTE_PERSISTNACE_COMPLETE, subject= cloud_storage_path, description=f"file uploaded to {cloud_storage_ref}"))
146
+ success = True
147
+ except Exception as e:
148
+ attempts += 1
149
+ if attempts < max_retries:
150
+ time.sleep(2 ** attempts)
151
+ else:
152
+ err_msg=f"Error uploading file to {cloud_storage_ref} bucket {bucket_name} with name {file_name} : {type(e).__name__}-{str(e)}"
153
+ pipelinemon.add_log(ContextLog(LogLevel.ERROR_EXCEPTION, e=e, description=err_msg))
154
+ return {"cloud_storage_upload_error": err_msg}
155
+
156
+ except Exception as e:
157
+ pipelinemon.add_log(ContextLog(LogLevel.ERROR_EXCEPTION, e=e))
158
+ return {"cloud_storage_upload_error": f"Exception in GCS upload {type(e).__name__}-{str(e)}"}
159
+ # --- Return Metadata ---
160
+ return {
161
+ "cloud_storage_path": cloud_storage_path if ((success or not upload_allowed) and not cloud_storage_upload_error ) else None,
162
+ "cloud_storage_file_already_exists": cloud_storage_file_already_exists,
163
+ "cloud_storage_file_exists_checked_on_name":cloud_storage_file_exists_checked_on_name ,
164
+ "cloud_storage_file_overwritten": cloud_storage_file_overwritten,
165
+ "cloud_storage_deleted_file_names": ",,,".join(cloud_storage_deleted_files) if cloud_storage_deleted_files else None,
166
+ "cloud_storage_file_saved_with_increment": cloud_storage_file_saved_with_increment,
167
+ "cloud_storage_upload_error": cloud_storage_upload_error
168
+ }
169
+
@@ -0,0 +1,26 @@
1
+ # pylint: disable=missing-module-docstring
2
+ # pylint: disable=missing-function-docstring
3
+ # pylint: disable=missing-class-docstring
4
+ # pylint: disable=broad-exception-caught
5
+ # pylint: disable=line-too-long
6
+ # pylint: disable=unused-variable
7
+ # pylint: disable=broad-exception-raised
8
+ from typing import Optional
9
+ from ipulse_shared_core_ftredge.enums.enums_cloud import CloudProvider
10
+ from .utils_collector_pipelinemon import Pipelinemon
11
+ from .utils_cloud_gcp_with_collectors import write_json_to_gcs_with_pipelinemon_extended
12
+
13
+
14
+ def write_json_to_cloud_storage_with_pipelinemon_extended(cloud_provider:CloudProvider, pipelinemon:Pipelinemon, storage_client, data:dict | list | str, bucket_name: str, file_name: str,
15
+ file_exists_if_starts_with_prefix:Optional[str] =None, overwrite_if_exists:bool=False, increment_if_exists:bool=False,
16
+ max_retries:int=2, max_deletable_files:int=1):
17
+
18
+
19
+ if cloud_provider == CloudProvider.GCP:
20
+ return write_json_to_gcs_with_pipelinemon_extended(pipelinemon=pipelinemon, storage_client=storage_client, data=data, bucket_name=bucket_name, file_name=file_name,
21
+ file_exists_if_starts_with_prefix=file_exists_if_starts_with_prefix,
22
+ overwrite_if_exists=overwrite_if_exists, increment_if_exists=increment_if_exists,
23
+ max_retries=max_retries,
24
+ max_deletable_files=max_deletable_files)
25
+
26
+ raise ValueError(f"Unsupported cloud provider: {cloud_provider}. Supported cloud providers: {CloudProvider.GCP.value}")
@@ -0,0 +1,356 @@
1
+ # pylint: disable=missing-module-docstring
2
+ # pylint: disable=missing-function-docstring
3
+ # pylint: disable=logging-fstring-interpolation
4
+ # pylint: disable=line-too-long
5
+ # pylint: disable=missing-class-docstring
6
+ # pylint: disable=broad-exception-caught
7
+ import json
8
+ import uuid
9
+ from datetime import datetime, timezone
10
+ from contextlib import contextmanager
11
+ from typing import List
12
+ from ipulse_shared_core_ftredge.enums.enums_logs import TargetLogs, LogLevel
13
+ from ipulse_shared_core_ftredge.utils.logs.context_log import ContextLog
14
+ ############################################################################
15
+ ##### PIPINEMON Collector for Logs and Statuses of running pipelines #######
16
+ class Pipelinemon:
17
+ ERROR_START_CODE = LogLevel.ERROR.value
18
+ WARNING_START_CODE = LogLevel.WARNING.value
19
+ NOTICE_START_CODE = LogLevel.NOTICE.value
20
+ SUCCESS_START_CODE = LogLevel.SUCCESS.value
21
+ INFO_START_CODE = LogLevel.INFO.value
22
+
23
+ def __init__(self, base_context: str, logger, target_logs: TargetLogs = TargetLogs.MIXED,
24
+ max_log_field_size:int =10000, max_log_dict_size:float=256 * 1024 * 0.80, max_log_traceback_lines:int = 30):
25
+
26
+ self._id = str(uuid.uuid4())
27
+ self._logs = []
28
+ self._early_stop = False
29
+ self._errors_count = 0
30
+ self._warnings_count = 0
31
+ self._notices_count = 0
32
+ self._successes_count = 0
33
+ self._infos_count = 0
34
+ self._systems_impacted = []
35
+ self._by_level_counts = {level.name: 0 for level in LogLevel}
36
+ self._base_context = base_context
37
+ self._context_stack = []
38
+ self._target_logs = target_logs.value
39
+ self._logger = logger
40
+ self._max_log_field_size = max_log_field_size
41
+ self._max_log_dict_size = max_log_dict_size
42
+ self._max_log_traceback_lines = max_log_traceback_lines
43
+
44
+
45
+ @contextmanager
46
+ def context(self, context):
47
+ self.push_context(context)
48
+ try:
49
+ yield
50
+ finally:
51
+ self.pop_context()
52
+
53
+ def push_context(self, context):
54
+ self._context_stack.append(context)
55
+
56
+ def pop_context(self):
57
+ if self._context_stack:
58
+ self._context_stack.pop()
59
+
60
+ @property
61
+ def current_context(self):
62
+ return " >> ".join(self._context_stack)
63
+
64
+ @property
65
+ def base_context(self):
66
+ return self._base_context
67
+
68
+ @property
69
+ def id(self):
70
+ return self._id
71
+
72
+ @property
73
+ def systems_impacted(self):
74
+ return self._systems_impacted
75
+
76
+ @systems_impacted.setter
77
+ def systems_impacted(self, list_of_si: List[str]):
78
+ self._systems_impacted = list_of_si
79
+
80
+ def add_system_impacted(self, system_impacted: str):
81
+ if self._systems_impacted is None:
82
+ self._systems_impacted = []
83
+ self._systems_impacted.append(system_impacted)
84
+
85
+ def clear_systems_impacted(self):
86
+ self._systems_impacted = []
87
+
88
+ @property
89
+ def max_log_dict_size(self):
90
+ return self._max_log_dict_size
91
+
92
+ @max_log_dict_size.setter
93
+ def max_log_dict_size(self, value):
94
+ self._max_log_dict_size = value
95
+
96
+ @property
97
+ def max_log_field_size(self):
98
+ return self._max_log_field_size
99
+
100
+ @max_log_field_size.setter
101
+ def max_log_field_size(self, value):
102
+ self._max_log_field_size = value
103
+
104
+ @property
105
+ def max_log_traceback_lines(self):
106
+ return self._max_log_traceback_lines
107
+
108
+ @max_log_traceback_lines.setter
109
+ def max_log_traceback_lines(self, value):
110
+ self._max_log_traceback_lines = value
111
+
112
+ @property
113
+ def early_stop(self):
114
+ return self._early_stop
115
+
116
+ def set_early_stop(self, max_errors_tolerance:int=0, max_warnings_tolerance:int=0, create_error_log=True, pop_context=False):
117
+ self._early_stop = True
118
+ if create_error_log:
119
+ if pop_context:
120
+ self.pop_context()
121
+ if max_errors_tolerance > 0:
122
+ self.add_log(ContextLog(level=LogLevel.ERROR_PIPELINE_THRESHOLD_REACHED,
123
+ subject="EARLY_STOP",
124
+ description=f"Total MAX_ERRORS_TOLERANCE of {max_errors_tolerance} has been reached."))
125
+ elif max_warnings_tolerance > 0:
126
+ self.add_log(ContextLog(level=LogLevel.ERROR_PIPELINE_THRESHOLD_REACHED,
127
+ subject="EARLY_STOP",
128
+ description=f"Total MAX_WARNINGS_TOLERANCE of {max_warnings_tolerance} has been reached."))
129
+ else:
130
+ self.add_log(ContextLog(level=LogLevel.ERROR_PIPELINE_THRESHOLD_REACHED,
131
+ subject="EARLY_STOP",
132
+ description="Early stop has been triggered."))
133
+
134
+ def reset_early_stop(self):
135
+ self._early_stop = False
136
+
137
+
138
+ def add_log(self, log: ContextLog ):
139
+ if (self._target_logs == TargetLogs.SUCCESSES and log.level >=self.NOTICE_START_CODE) or \
140
+ (self._target_logs == TargetLogs.WARNINGS_AND_ERRORS and log.level.value < self.WARNING_START_CODE):
141
+ raise ValueError(f"Invalid log level {log.level.name} for Pipelinemon target logs setup: {self._target_logs}")
142
+ log.base_context = self.base_context
143
+ log.context = self.current_context if self.current_context else "root"
144
+ log.collector_id = self.id
145
+ log.systems_impacted = self.systems_impacted
146
+ log_dict = log.to_dict(max_field_len=self.max_log_field_size, size_limit=self.max_log_dict_size, max_traceback_lines=self.max_log_traceback_lines)
147
+ self._logs.append(log_dict)
148
+ self._update_counts(log_dict)
149
+
150
+ if self._logger:
151
+ # We specifically want to avoid having an ERROR log level for this structured Pipelinemon reporting, to ensure Errors are alerting on Critical Application Services.
152
+ # A single ERROR log level is usually added at the end of the entire pipeline
153
+ if log.level.value >= self.WARNING_START_CODE:
154
+ self._logger.warning(log_dict)
155
+ else:
156
+ self._logger.info(log_dict)
157
+
158
+ def add_logs(self, logs: List[ContextLog]):
159
+ for log in logs:
160
+ self.add_log(log)
161
+
162
+ def clear_logs_and_counts(self):
163
+ self._logs = []
164
+ self._errors_count = 0
165
+ self._warnings_count = 0
166
+ self._notices_count = 0
167
+ self._successes_count = 0
168
+ self._infos_count = 0
169
+ self._by_level_counts = {level.name: 0 for level in LogLevel}
170
+
171
+ def clear_logs(self):
172
+ self._logs = []
173
+
174
+ def get_all_logs(self,in_json_format=False):
175
+ if in_json_format:
176
+ return json.dumps(self._logs)
177
+ return self._logs
178
+
179
+ def get_logs_for_level(self, level: LogLevel):
180
+ return [log for log in self._logs if log["level_code"] == level.value]
181
+
182
+ def get_logs_by_str_in_context(self, context_substring: str):
183
+ return [
184
+ log for log in self._logs
185
+ if context_substring in log["context"]
186
+ ]
187
+
188
+ def contains_errors(self):
189
+ return self._errors_count > 0
190
+
191
+ def count_errors(self):
192
+ return self._errors_count
193
+
194
+ def contains_warnings_or_errors(self):
195
+ return self._warnings_count > 0 or self._errors_count > 0
196
+
197
+ def count_warnings_and_errors(self):
198
+ return self._warnings_count + self._errors_count
199
+
200
+ def count_warnings(self):
201
+ return self._warnings_count
202
+
203
+ def count_notices(self):
204
+ return self._notices_count
205
+
206
+ def count_successes(self):
207
+ return self._successes_count
208
+
209
+ def count_successes_with_notice(self):
210
+ return self.count_logs_by_level(LogLevel.SUCCESS_WITH_NOTICES)
211
+
212
+ def count_successes_no_notice(self):
213
+ return self.count_logs_by_level(LogLevel.SUCCESS)
214
+
215
+ def count_infos(self):
216
+ return self._infos_count
217
+
218
+ def count_all_logs(self):
219
+ return len(self._logs)
220
+
221
+ def count_logs_by_level(self, level: LogLevel):
222
+ return self._by_level_counts.get(level.name, 0)
223
+
224
+ def _count_logs(self, context_substring: str, exact_match=False, level_code_min=None, level_code_max=None):
225
+ return sum(
226
+ 1 for log in self._logs
227
+ if (log["context"] == context_substring if exact_match else context_substring in log["context"]) and
228
+ (level_code_min is None or log["level_code"] >= level_code_min) and
229
+ (level_code_max is None or log["level_code"] <= level_code_max)
230
+ )
231
+
232
+ def count_logs_for_current_context(self):
233
+ return self._count_logs(self.current_context, exact_match=True)
234
+
235
+ def count_logs_for_current_and_nested_contexts(self):
236
+ return self._count_logs(self.current_context)
237
+
238
+ def count_logs_by_level_for_current_context(self, level: LogLevel):
239
+ return self._count_logs(self.current_context, exact_match=True, level_code_min=level.value, level_code_max=level.value)
240
+
241
+ def count_logs_by_level_for_current_and_nested_contexts(self, level: LogLevel):
242
+ return self._count_logs(self.current_context, level_code_min=level.value, level_code_max=level.value)
243
+
244
+ def count_errors_for_current_context(self):
245
+ return self._count_logs(self.current_context, exact_match=True, level_code_min=self.ERROR_START_CODE)
246
+
247
+ def count_errors_for_current_and_nested_contexts(self):
248
+ return self._count_logs(self.current_context, level_code_min=self.ERROR_START_CODE)
249
+
250
+ def count_warnings_and_errors_for_current_context(self):
251
+ return self._count_logs(self.current_context, exact_match=True, level_code_min=self.WARNING_START_CODE)
252
+
253
+ def count_warnings_and_errors_for_current_and_nested_contexts(self):
254
+ return self._count_logs(self.current_context, level_code_min=self.WARNING_START_CODE)
255
+
256
+ def count_warnings_for_current_context(self):
257
+ return self._count_logs(self.current_context, exact_match=True, level_code_min=self.WARNING_START_CODE, level_code_max=self.ERROR_START_CODE - 1)
258
+
259
+ def count_warnings_for_current_and_nested_contexts(self):
260
+ return self._count_logs(self.current_context, level_code_min=self.WARNING_START_CODE, level_code_max=self.ERROR_START_CODE - 1)
261
+
262
+ def count_notices_for_current_context(self):
263
+ return self._count_logs(self.current_context, exact_match=True, level_code_min=self.NOTICE_START_CODE, level_code_max=self.WARNING_START_CODE-1)
264
+
265
+ def count_notices_for_current_and_nested_contexts(self):
266
+ return self._count_logs(self.current_context, level_code_min=self.NOTICE_START_CODE, level_code_max=self.WARNING_START_CODE-1)
267
+
268
+ def count_successes_for_current_context(self):
269
+ return self._count_logs(self.current_context, exact_match=True, level_code_min=self.SUCCESS_START_CODE, level_code_max=self.NOTICE_START_CODE-1)
270
+
271
+ def count_successes_for_current_and_nested_contexts(self):
272
+ return self._count_logs(self.current_context, level_code_min=self.SUCCESS_START_CODE, level_code_max=self.NOTICE_START_CODE-1)
273
+
274
+ def count_infos_for_current_context(self):
275
+ return self._count_logs(self.current_context, exact_match=True, level_code_min=self.INFO_START_CODE, level_code_max=self.SUCCESS_START_CODE-1)
276
+
277
+ def count_infos_for_current_and_nested_contexts(self):
278
+ return self._count_logs(self.current_context, level_code_min=self.INFO_START_CODE, level_code_max=self.SUCCESS_START_CODE-1)
279
+
280
+ def _update_counts(self, log, remove=False):
281
+ level_code = log["level_code"]
282
+ level_name = log["level_name"]
283
+
284
+ if remove:
285
+ if level_code >= self.ERROR_START_CODE:
286
+ self._errors_count -= 1
287
+ elif self.WARNING_START_CODE <= level_code < self.ERROR_START_CODE:
288
+ self._warnings_count -= 1
289
+ elif self.NOTICE_START_CODE <= level_code < self.WARNING_START_CODE:
290
+ self._notices_count -= 1
291
+ elif self.SUCCESS_START_CODE <= level_code < self.NOTICE_START_CODE:
292
+ self._successes_count -= 1
293
+ elif self.INFO_START_CODE <= level_code < self.SUCCESS_START_CODE:
294
+ self._infos_count -= 1
295
+ self._by_level_counts[level_name] -= 1
296
+ else:
297
+ if level_code >= self.ERROR_START_CODE:
298
+ self._errors_count += 1
299
+ elif self.WARNING_START_CODE <= level_code < self.ERROR_START_CODE:
300
+ self._warnings_count += 1
301
+ elif self.NOTICE_START_CODE <= level_code < self.WARNING_START_CODE:
302
+ self._notices_count += 1
303
+ elif self.SUCCESS_START_CODE <= level_code < self.NOTICE_START_CODE:
304
+ self._successes_count += 1
305
+ elif self.INFO_START_CODE <= level_code < self.SUCCESS_START_CODE:
306
+ self._infos_count += 1
307
+ self._by_level_counts[level_name] += 1
308
+
309
+ def generate_file_name(self, file_prefix=None, include_base_context=True):
310
+ if not file_prefix:
311
+ file_prefix = self._target_logs
312
+ timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
313
+ if include_base_context:
314
+ file_name = f"{file_prefix}_{timestamp}_{self.base_context}_len{len(self._logs)}.json"
315
+ else:
316
+ file_name = f"{file_prefix}_{timestamp}_len{len(self._logs)}.json"
317
+
318
+ return file_name
319
+
320
+ def import_logs_from_json(self, json_or_file, logger=None):
321
+ def log_message(message):
322
+ if logger:
323
+ logger.info(message)
324
+
325
+ def log_warning(message, exc_info=False):
326
+ if logger:
327
+ logger.warning(message, exc_info=exc_info)
328
+
329
+ try:
330
+ if isinstance(json_or_file, str): # Load from string
331
+ imported_logs = json.loads(json_or_file)
332
+ elif hasattr(json_or_file, 'read'): # Load from file-like object
333
+ imported_logs = json.load(json_or_file)
334
+ self.add_logs(imported_logs)
335
+ log_message("Successfully imported logs from json.")
336
+ except Exception as e:
337
+ log_warning(f"Failed to import logs from json: {type(e).__name__} - {str(e)}", exc_info=True)
338
+
339
+
340
+ def generate_final_log_message(self, subjectref: str, total_subjs: int) -> str:
341
+ return f"""
342
+ Pipeline for {self.base_context} with pipelinemon.id: {self.id},
343
+ SUMMARY: Early_Stop= {self.early_stop} ;
344
+ A)SUCCESSES: {self.count_successes()}/{total_subjs} {subjectref}(s) ; out of which SUCCESSES_WITH_NOTICES: {self.count_successes_with_notice()}/{total_subjs} {subjectref}(s) ;
345
+ B)NOTICES: {self.count_notices()} ;
346
+ C)WARNINGS: {self.count_warnings()} ;
347
+ D)ERRORS: {self.count_errors()} ;
348
+ E)INFOS: {self.count_infos()} ;
349
+ """
350
+
351
+ def log_final_message(self, subjectref: str, total_subjs: int, generallogger):
352
+ final_log_message = self.generate_final_log_message(subjectref=subjectref, total_subjs=total_subjs)
353
+ if self.count_warnings_and_errors() > 0:
354
+ generallogger.error(final_log_message)
355
+ else:
356
+ generallogger.info(final_log_message)