ipulse-shared-core-ftredge 2.57__py3-none-any.whl → 3.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.

Files changed (24) hide show
  1. ipulse_shared_core_ftredge/__init__.py +9 -18
  2. ipulse_shared_core_ftredge/models/__init__.py +0 -1
  3. ipulse_shared_core_ftredge/models/organisation.py +61 -55
  4. ipulse_shared_core_ftredge/models/resource_catalog_item.py +97 -171
  5. ipulse_shared_core_ftredge/utils/__init__.py +3 -0
  6. ipulse_shared_core_ftredge/utils/utils_common.py +10 -0
  7. {ipulse_shared_core_ftredge-2.57.dist-info → ipulse_shared_core_ftredge-3.1.1.dist-info}/METADATA +5 -7
  8. ipulse_shared_core_ftredge-3.1.1.dist-info/RECORD +15 -0
  9. {ipulse_shared_core_ftredge-2.57.dist-info → ipulse_shared_core_ftredge-3.1.1.dist-info}/WHEEL +1 -1
  10. ipulse_shared_core_ftredge/enums/__init__.py +0 -29
  11. ipulse_shared_core_ftredge/enums/enums_common_utils.py +0 -177
  12. ipulse_shared_core_ftredge/enums/enums_data_eng.py +0 -44
  13. ipulse_shared_core_ftredge/enums/enums_module_fincore.py +0 -58
  14. ipulse_shared_core_ftredge/enums/enums_modules.py +0 -33
  15. ipulse_shared_core_ftredge/models/audit_log_firestore.py +0 -12
  16. ipulse_shared_core_ftredge/models/pulse_enums.py +0 -196
  17. ipulse_shared_core_ftredge/utils_custom_logs.py +0 -201
  18. ipulse_shared_core_ftredge/utils_gcp.py +0 -314
  19. ipulse_shared_core_ftredge/utils_gcp_for_pipelines.py +0 -201
  20. ipulse_shared_core_ftredge/utils_pipelinemon.py +0 -362
  21. ipulse_shared_core_ftredge/utils_templates_and_schemas.py +0 -153
  22. ipulse_shared_core_ftredge-2.57.dist-info/RECORD +0 -25
  23. {ipulse_shared_core_ftredge-2.57.dist-info → ipulse_shared_core_ftredge-3.1.1.dist-info}/LICENCE +0 -0
  24. {ipulse_shared_core_ftredge-2.57.dist-info → ipulse_shared_core_ftredge-3.1.1.dist-info}/top_level.txt +0 -0
@@ -1,201 +0,0 @@
1
- # pylint: disable=missing-module-docstring
2
- # pylint: disable=missing-function-docstring
3
- # pylint: disable=missing-class-docstring
4
- # pylint: disable=broad-exception-caught
5
- # pylint: disable=line-too-long
6
- # pylint: disable=unused-variable
7
- import json
8
- import os
9
- import time
10
- from ipulse_shared_core_ftredge.enums.enums_common_utils import LogLevel
11
- from .utils_custom_logs import ContextLog
12
- from ipulse_shared_core_ftredge.utils_pipelinemon import Pipelinemon
13
-
14
- ############################################################################
15
- ##################### SETTING UP LOGGER ##########################
16
-
17
- ####DEPCREACATED: THIS APPROACH WAS GOOD, BUT ERRORS WERE NOT REPORTED TO ERROR REPORTING
18
- # logging.basicConfig(level=logging.INFO)
19
- # logging_client = google.cloud.logging.Client()
20
- # logging_client.setup_logging()
21
- ###################################
22
-
23
-
24
- ##### THIS APPROACH IS USED NOW ########
25
- ENV = os.getenv('ENV', 'LOCAL').strip("'")
26
-
27
-
28
- def write_json_to_gcs_in_pipeline( pipelinemon:Pipelinemon, storage_client, data, bucket_name, file_name,
29
- file_exists_if_starts_with_prefix:str=None, overwrite_if_exists:bool=False, increment_if_exists:bool=False,
30
- save_locally:bool=False, local_path=None, max_retries:int=2, max_deletable_files:int=1):
31
- """Saves data to Google Cloud Storage and optionally locally.
32
-
33
- This function attempts to upload data to GCS.
34
- - If the upload fails after retries and `save_locally` is True or `local_path` is provided, it attempts to save the data locally.
35
- - It handles file name conflicts based on these rules:
36
- - If `overwrite_if_exists` is True:
37
- - If `file_exists_if_contains_substr` is provided, ANY existing file containing the substring is deleted, and the new file is saved with the provided `file_name`.
38
- - If `file_exists_if_contains_substr` is None, and a file with the exact `file_name` exists, it's overwritten.
39
- - If `increment_if_exists` is True:
40
- - If `file_exists_if_contains_substr` is provided, a new file with an incremented version is created ONLY if a file with the EXACT `file_name` exists.
41
- - If `file_exists_if_contains_substr` is None, a new file with an incremented version is created if a file with the exact `file_name` exists.
42
-
43
- -If both overwrite_if_exists and increment_if_exists are provided as Ture, an exception will be raised.
44
- """
45
-
46
- with pipelinemon.context("write_json_to_gcs_in_pipeline"):
47
- # GCS upload exception
48
- gcs_upload_error = False
49
- # Input validation
50
- if overwrite_if_exists and increment_if_exists:
51
- pipelinemon.add_log(ContextLog(LogLevel.ERROR_CUSTOM, subject="Param validation", description="Both 'overwrite_if_exists' and 'increment_if_exists' cannot be True simultaneously."))
52
- gcs_upload_error=True
53
- if not isinstance(data, (list, dict, str)):
54
- pipelinemon.add_log(ContextLog(LogLevel.ERROR_CUSTOM,subject="Data validation", description="Unsupported data type. Data must be a list, dict, or str."))
55
- gcs_upload_error=True
56
- if max_deletable_files > 10:
57
- pipelinemon.add_log(ContextLog(LogLevel.ERROR_CUSTOM,subject="max_deletable_files", description="max_deletable_files should be less than 10 for safety. For more use another method."))
58
- gcs_upload_error=True
59
-
60
- # Prepare data
61
- if isinstance(data, (list, dict)):
62
- data_str = json.dumps(data, indent=2)
63
- else:
64
- data_str = data
65
-
66
- bucket = storage_client.bucket(bucket_name)
67
- base_file_name, ext = os.path.splitext(file_name)
68
- increment = 0
69
- attempts = 0
70
- success = False
71
-
72
- # GCS-related metadata
73
- gcs_path = None
74
- gcs_file_overwritten = False
75
- gcs_file_already_exists = False
76
- gcs_file_saved_with_increment = False
77
- gcs_file_exists_checked_on_name = file_name
78
- gcs_deleted_files=[]
79
-
80
- # Local file path
81
- local_path_final = None
82
-
83
- try:
84
- upload_allowed = True
85
- # --- Overwrite Logic ---
86
- if overwrite_if_exists:
87
- with pipelinemon.context("overwriting"):
88
- if file_exists_if_starts_with_prefix:
89
- gcs_file_exists_checked_on_name = file_exists_if_starts_with_prefix
90
- blobs_to_delete = list(bucket.list_blobs(prefix=file_exists_if_starts_with_prefix))
91
- if len(blobs_to_delete) > max_deletable_files:
92
- pipelinemon.add_log(ContextLog(LogLevel.NOTICE_ALREADY_EXISTS, subject=file_exists_if_starts_with_prefix, description=f"Prefix matched with {len(blobs_to_delete)} files in bucket {bucket_name}"))
93
- #### Ensuring to quit the operation if too many files are found
94
- raise Exception(f"Error: Attempt to delete {len(blobs_to_delete)} matched files, but limit is {max_deletable_files}.")
95
- if blobs_to_delete:
96
- gcs_file_already_exists = True
97
- pipelinemon.add_log(ContextLog(LogLevel.NOTICE_ALREADY_EXISTS, subject=file_exists_if_starts_with_prefix, description=f"Prefix matched with {len(blobs_to_delete)} files in bucket {bucket_name}"))
98
- for blob in blobs_to_delete:
99
- gcs_path_del = f"gs://{bucket_name}/{blob.name}"
100
- pipelinemon.add_system_impacted(f"delete: gcs_bucket_file: {gcs_path_del}")
101
- blob.delete()
102
- pipelinemon.add_log(ContextLog(LogLevel.INFO_REMOTE_DELETE_COMPLETE, subject= gcs_path_del, description="file deleted from GCS as part of overwrite, matched with prefix"))
103
- gcs_deleted_files.append(blob.name)
104
- gcs_file_overwritten = True
105
- else:
106
- blob = bucket.blob(file_name)
107
- if blob.exists():
108
- gcs_file_already_exists = True
109
- pipelinemon.add_log(ContextLog(LogLevel.NOTICE_ALREADY_EXISTS, subject=file_name, description=f"Exact name matched with existing file in bucket {bucket_name}"))
110
- gcs_path_del = f"gs://{bucket_name}/{file_name}"
111
- pipelinemon.add_system_impacted(f"delete: gcs_bucket_file: {gcs_path_del}")
112
- blob.delete() # Delete the existing blob
113
- pipelinemon.add_log(ContextLog(LogLevel.INFO_REMOTE_DELETE_COMPLETE, subject= gcs_path_del, description="file deleted from GCS as part of overwrite, matched with exact name"))
114
- gcs_deleted_files.append(blob.name)
115
- gcs_file_overwritten = True
116
-
117
- # --- Increment Logic ---
118
- elif increment_if_exists:
119
- with pipelinemon.context("incrementing"):
120
- gcs_file_exists_checked_on_name = file_name # We only increment if the exact name exists
121
- while bucket.blob(file_name).exists():
122
- gcs_file_already_exists = True
123
- increment += 1
124
- file_name = f"{base_file_name}_v{increment}{ext}"
125
- gcs_file_saved_with_increment = True
126
- if increment>0:
127
- pipelinemon.add_log(ContextLog(LogLevel.NOTICE_ALREADY_EXISTS, subject=file_name, description=f"File saved with incremented version in {bucket_name}"))
128
-
129
- # --- Check for Conflicts (Including Prefix) ---
130
- else:
131
- if file_exists_if_starts_with_prefix:
132
- blobs_matched = list(bucket.list_blobs(prefix=file_exists_if_starts_with_prefix))
133
- if blobs_matched:
134
- upload_allowed = False
135
- pipelinemon.add_log(ContextLog(LogLevel.NOTICE_ALREADY_EXISTS, subject=file_exists_if_starts_with_prefix, description=f"Prefix matched with {len(blobs_matched)} existing files in bucket {bucket_name}."))
136
- elif bucket.blob(file_name).exists():
137
- pipelinemon.add_log(ContextLog(LogLevel.NOTICE_ALREADY_EXISTS, subject=file_name, description=f"Exact name matched with existing file in bucket {bucket_name}."))
138
- upload_allowed = False
139
-
140
- # --- GCS Upload ---
141
- if overwrite_if_exists or increment_if_exists or upload_allowed:
142
- with pipelinemon.context("uploading"):
143
- while attempts < max_retries and not success:
144
- try:
145
- gcs_path = f"gs://{bucket_name}/{file_name}"
146
- blob = bucket.blob(file_name) # Use the potentially updated file_name
147
- pipelinemon.add_system_impacted(f"upload: gcs_bucket_file: {gcs_path}")
148
- blob.upload_from_string(data_str, content_type='application/json')
149
- pipelinemon.add_log(ContextLog(LogLevel.INFO_REMOTE_PERSISTNACE_COMPLETE, subject= gcs_path, description="file uploaded to GCS"))
150
- success = True
151
- except Exception as e:
152
- attempts += 1
153
- if attempts < max_retries:
154
- time.sleep(2 ** attempts)
155
- else:
156
- pipelinemon.add_log(ContextLog(LogLevel.ERROR_EXCEPTION, e=e))
157
- gcs_upload_error = True
158
-
159
- except Exception as e:
160
- pipelinemon.add_log(ContextLog(LogLevel.ERROR_EXCEPTION, e=e))
161
- gcs_upload_error = True
162
-
163
- # --- Save Locally ---
164
- ###### Not logging the local save operation in pipeline, as it is not a critical operation
165
- write_out=False
166
- if not success or gcs_upload_error or save_locally or local_path:
167
- try:
168
- local_path=local_path if local_path else "/tmp"
169
- local_path_final = os.path.join(local_path, file_name)
170
-
171
- if os.path.exists(local_path_final):
172
- if increment_if_exists:
173
- increment = 0
174
- while os.path.exists(local_path_final):
175
- increment += 1
176
- local_path_final = os.path.join(local_path, f"{base_file_name}_v{increment}{ext}")
177
- write_out=True
178
- elif overwrite_if_exists:
179
- write_out=True
180
- else:
181
- write_out=False
182
- else:
183
- write_out=True
184
-
185
- if write_out:
186
- with open(local_path_final, 'w', encoding='utf-8') as f:
187
- f.write(data_str)
188
-
189
- except Exception as local_e:
190
- pipelinemon.add_log(ContextLog(LogLevel.WARNING_FIX_RECOMMENDED, e=local_e, description="Failed to write to local file"))
191
-
192
- # --- Return Metadata ---
193
- return {
194
- "gcs_path": gcs_path if success else None, # Only set gcs_path if upload succeeded
195
- "local_path": local_path_final if write_out else None, # Only set local_path if saved locally
196
- "gcs_file_already_exists": gcs_file_already_exists,
197
- "gcs_file_exists_checked_on_name":gcs_file_exists_checked_on_name ,
198
- "gcs_file_overwritten": gcs_file_overwritten,
199
- "gcs_deleted_file_names": ",,,".join(gcs_deleted_files) if gcs_deleted_files else None,
200
- "gcs_file_saved_with_increment": gcs_file_saved_with_increment
201
- }
@@ -1,362 +0,0 @@
1
- # pylint: disable=missing-module-docstring
2
- # pylint: disable=missing-function-docstring
3
- # pylint: disable=logging-fstring-interpolation
4
- # pylint: disable=line-too-long
5
- # pylint: disable=missing-class-docstring
6
- # pylint: disable=broad-exception-caught
7
- import json
8
- import uuid
9
- from datetime import datetime, timezone
10
- from contextlib import contextmanager
11
- from typing import List
12
- from google.cloud import logging as cloudlogging
13
- from ipulse_shared_core_ftredge.enums.enums_common_utils import TargetLogs, LogLevel
14
- from ipulse_shared_core_ftredge.utils_gcp import write_json_to_gcs
15
- from ipulse_shared_core_ftredge.utils_custom_logs import ContextLog
16
-
17
-
18
- class Pipelinemon:
19
- ERROR_START_CODE = LogLevel.ERROR.value
20
- WARNING_START_CODE = LogLevel.WARNING.value
21
- NOTICE_START_CODE = LogLevel.NOTICE.value
22
- SUCCESS_START_CODE = LogLevel.SUCCESS.value
23
- INFO_START_CODE = LogLevel.INFO.value
24
-
25
- def __init__(self, base_context: str, target_logs: TargetLogs = TargetLogs.MIXED, logger_name=None, max_log_field_size:int =10000, max_log_dict_size:float=256 * 1024 * 0.80, max_log_traceback_lines:int = 30):
26
- self._id = str(uuid.uuid4())
27
- self._logs = []
28
- self._early_stop = False
29
- self._errors_count = 0
30
- self._warnings_count = 0
31
- self._notices_count = 0
32
- self._successes_count = 0
33
- self._infos_count = 0
34
- self._systems_impacted = []
35
- self._by_level_counts = {level.name: 0 for level in LogLevel}
36
- self._base_context = base_context
37
- self._context_stack = []
38
- self._target_logs = target_logs.value
39
- self._logger = self._initialize_logger(logger_name)
40
- self._max_log_field_size = max_log_field_size
41
- self._max_log_dict_size = max_log_dict_size
42
- self._max_log_traceback_lines = max_log_traceback_lines
43
-
44
- def _initialize_logger(self, logger_name):
45
- if logger_name:
46
- logging_client = cloudlogging.Client()
47
- return logging_client.logger(logger_name)
48
- return None
49
-
50
- @contextmanager
51
- def context(self, context):
52
- self.push_context(context)
53
- try:
54
- yield
55
- finally:
56
- self.pop_context()
57
-
58
- def push_context(self, context):
59
- self._context_stack.append(context)
60
-
61
- def pop_context(self):
62
- if self._context_stack:
63
- self._context_stack.pop()
64
-
65
- @property
66
- def current_context(self):
67
- return " >> ".join(self._context_stack)
68
-
69
- @property
70
- def base_context(self):
71
- return self._base_context
72
-
73
- @property
74
- def id(self):
75
- return self._id
76
-
77
- @property
78
- def systems_impacted(self):
79
- return self._systems_impacted
80
-
81
- @systems_impacted.setter
82
- def systems_impacted(self, list_of_si: List[str]):
83
- self._systems_impacted = list_of_si
84
-
85
- def add_system_impacted(self, system_impacted: str):
86
- if self._systems_impacted is None:
87
- self._systems_impacted = []
88
- self._systems_impacted.append(system_impacted)
89
-
90
- def clear_systems_impacted(self):
91
- self._systems_impacted = []
92
-
93
- @property
94
- def max_log_dict_size(self):
95
- return self._max_log_dict_size
96
-
97
- @max_log_dict_size.setter
98
- def max_log_dict_size(self, value):
99
- self._max_log_dict_size = value
100
-
101
- @property
102
- def max_log_field_size(self):
103
- return self._max_log_field_size
104
-
105
- @max_log_field_size.setter
106
- def max_log_field_size(self, value):
107
- self._max_log_field_size = value
108
-
109
- @property
110
- def max_log_traceback_lines(self):
111
- return self._max_log_traceback_lines
112
-
113
- @max_log_traceback_lines.setter
114
- def max_log_traceback_lines(self, value):
115
- self._max_log_traceback_lines = value
116
-
117
- @property
118
- def early_stop(self):
119
- return self._early_stop
120
-
121
- def set_early_stop(self, max_errors_tolerance: int, create_error_log=True, pop_context=False):
122
- self._early_stop = True
123
- if create_error_log:
124
- if pop_context:
125
- self.pop_context()
126
- self.add_log(ContextLog(level=LogLevel.ERROR_PIPELINE_THRESHOLD_REACHED,
127
- subject="EARLY_STOP",
128
- description=f"Total MAX_ERRORS_TOLERANCE of {max_errors_tolerance} has been reached."))
129
-
130
- def reset_early_stop(self):
131
- self._early_stop = False
132
-
133
-
134
- def add_log(self, log: ContextLog ):
135
- if (self._target_logs == TargetLogs.SUCCESSES and log.level >=self.NOTICE_START_CODE) or \
136
- (self._target_logs == TargetLogs.WARNINGS_AND_ERRORS and log.level.value < self.WARNING_START_CODE):
137
- raise ValueError(f"Invalid log level {log.level.name} for Pipelinemon target logs setup: {self._target_logs}")
138
- log.base_context = self.base_context
139
- log.context = self.current_context
140
- log.collector_id = self.id
141
- log.systems_impacted = self.systems_impacted
142
- log_dict = log.to_dict(max_field_len=self.max_log_field_size, size_limit=self.max_log_dict_size, max_traceback_lines=self.max_log_traceback_lines)
143
- self._logs.append(log_dict)
144
- self._update_counts(log_dict)
145
-
146
- if self._logger:
147
- # We specifically want to avoid having an ERROR log level for this structured Pipelinemon reporting, to ensure Errors are alerting on Critical Application Services.
148
- # A single ERROR log level can be used for the entire pipeline, which shall be used at the end of the pipeline
149
- if log.level.value >= self.WARNING_START_CODE:
150
- self._logger.log_struct(log_dict, severity="WARNING")
151
- elif log.level.value >= self.NOTICE_START_CODE:
152
- self._logger.log_struct(log_dict, severity="NOTICE")
153
- else:
154
- self._logger.log_struct(log_dict, severity="INFO")
155
-
156
- def add_logs(self, logs: List[ContextLog]):
157
- for log in logs:
158
- self.add_log(log)
159
-
160
- def clear_logs_and_counts(self):
161
- self._logs = []
162
- self._errors_count = 0
163
- self._warnings_count = 0
164
- self._notices_count = 0
165
- self._successes_count = 0
166
- self._infos_count = 0
167
- self._by_level_counts = {level.name: 0 for level in LogLevel}
168
-
169
- def clear_logs(self):
170
- self._logs = []
171
-
172
- def get_all_logs(self):
173
- return self._logs
174
-
175
- def get_logs_for_level(self, level: LogLevel):
176
- return [log for log in self._logs if log["level_code"] == level.value]
177
-
178
- def get_logs_by_str_in_context(self, context_substring: str):
179
- return [
180
- log for log in self._logs
181
- if context_substring in log["context"]
182
- ]
183
-
184
- def contains_errors(self):
185
- return self._errors_count > 0
186
-
187
- def count_errors(self):
188
- return self._errors_count
189
-
190
- def contains_warnings_or_errors(self):
191
- return self._warnings_count > 0 or self._errors_count > 0
192
-
193
- def count_warnings_and_errors(self):
194
- return self._warnings_count + self._errors_count
195
-
196
- def count_warnings(self):
197
- return self._warnings_count
198
-
199
- def count_notices(self):
200
- return self._notices_count
201
-
202
- def count_successes(self):
203
- return self._successes_count
204
-
205
- def count_successes_with_notice(self):
206
- return self.count_logs_by_level(LogLevel.SUCCESS_WITH_NOTICES)
207
-
208
- def count_successes_no_notice(self):
209
- return self.count_logs_by_level(LogLevel.SUCCESS)
210
-
211
- def count_infos(self):
212
- return self._infos_count
213
-
214
- def count_all_logs(self):
215
- return len(self._logs)
216
-
217
- def count_logs_by_level(self, level: LogLevel):
218
- return self._by_level_counts.get(level.name, 0)
219
-
220
- def _count_logs(self, context_substring: str, exact_match=False, level_code_min=None, level_code_max=None):
221
- return sum(
222
- 1 for log in self._logs
223
- if (log["context"] == context_substring if exact_match else context_substring in log["context"]) and
224
- (level_code_min is None or log["level_code"] >= level_code_min) and
225
- (level_code_max is None or log["level_code"] <= level_code_max)
226
- )
227
-
228
- def count_logs_for_current_context(self):
229
- return self._count_logs(self.current_context, exact_match=True)
230
-
231
- def count_logs_for_current_and_nested_contexts(self):
232
- return self._count_logs(self.current_context)
233
-
234
- def count_logs_by_level_for_current_context(self, level: LogLevel):
235
- return self._count_logs(self.current_context, exact_match=True, level_code_min=level.value, level_code_max=level.value)
236
-
237
- def count_logs_by_level_for_current_and_nested_contexts(self, level: LogLevel):
238
- return self._count_logs(self.current_context, level_code_min=level.value, level_code_max=level.value)
239
-
240
- def count_errors_for_current_context(self):
241
- return self._count_logs(self.current_context, exact_match=True, level_code_min=self.ERROR_START_CODE)
242
-
243
- def count_errors_for_current_and_nested_contexts(self):
244
- return self._count_logs(self.current_context, level_code_min=self.ERROR_START_CODE)
245
-
246
- def count_warnings_and_errors_for_current_context(self):
247
- return self._count_logs(self.current_context, exact_match=True, level_code_min=self.WARNING_START_CODE)
248
-
249
- def count_warnings_and_errors_for_current_and_nested_contexts(self):
250
- return self._count_logs(self.current_context, level_code_min=self.WARNING_START_CODE)
251
-
252
- def count_warnings_for_current_context(self):
253
- return self._count_logs(self.current_context, exact_match=True, level_code_min=self.WARNING_START_CODE, level_code_max=self.ERROR_START_CODE - 1)
254
-
255
- def count_warnings_for_current_and_nested_contexts(self):
256
- return self._count_logs(self.current_context, level_code_min=self.WARNING_START_CODE, level_code_max=self.ERROR_START_CODE - 1)
257
-
258
- def count_notices_for_current_context(self):
259
- return self._count_logs(self.current_context, exact_match=True, level_code_min=self.NOTICE_START_CODE, level_code_max=self.WARNING_START_CODE-1)
260
-
261
- def count_notices_for_current_and_nested_contexts(self):
262
- return self._count_logs(self.current_context, level_code_min=self.NOTICE_START_CODE, level_code_max=self.WARNING_START_CODE-1)
263
-
264
- def count_successes_for_current_context(self):
265
- return self._count_logs(self.current_context, exact_match=True, level_code_min=self.SUCCESS_START_CODE, level_code_max=self.NOTICE_START_CODE-1)
266
-
267
- def count_successes_for_current_and_nested_contexts(self):
268
- return self._count_logs(self.current_context, level_code_min=self.SUCCESS_START_CODE, level_code_max=self.NOTICE_START_CODE-1)
269
-
270
- def count_infos_for_current_context(self):
271
- return self._count_logs(self.current_context, exact_match=True, level_code_min=self.INFO_START_CODE, level_code_max=self.SUCCESS_START_CODE-1)
272
-
273
- def count_infos_for_current_and_nested_contexts(self):
274
- return self._count_logs(self.current_context, level_code_min=self.INFO_START_CODE, level_code_max=self.SUCCESS_START_CODE-1)
275
-
276
- def export_logs_to_gcs_file(self, bucket_name, storage_client, file_prefix=None, file_name=None, top_level_context=None, save_locally=False, overwrite_if_exists=False, increment_if_exists=True, local_path=None, logger=None, max_retries=2):
277
- def log_message(message):
278
- if logger:
279
- logger.info(message)
280
-
281
- def log_error(message, exc_info=False):
282
- if logger:
283
- logger.error(message, exc_info=exc_info)
284
-
285
- if not file_prefix:
286
- file_prefix = self._target_logs
287
- if not file_name:
288
- timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
289
- if top_level_context:
290
- file_name = f"{file_prefix}_{timestamp}_{top_level_context}_len{len(self._logs)}.json"
291
- else:
292
- file_name = f"{file_prefix}_{timestamp}_len{len(self._logs)}.json"
293
-
294
- result = None
295
- try:
296
- result = write_json_to_gcs(
297
- bucket_name=bucket_name,
298
- storage_client=storage_client,
299
- data=self._logs,
300
- file_name=file_name,
301
- save_locally=save_locally,
302
- local_path=local_path,
303
- logger=logger,
304
- max_retries=max_retries,
305
- overwrite_if_exists=overwrite_if_exists,
306
- increment_if_exists=increment_if_exists
307
-
308
- )
309
- log_message(f"{file_prefix} successfully saved (overwritten={result.get('gcs_file_overwritten')}, incremented={result.get('gcs_file_saved_with_increment')}) to GCS at {result.get('gcs_path')} and locally at {result.get('local_path')}.")
310
- except Exception as e:
311
- log_error(f"Failed at export_logs_to_gcs_file for {file_prefix} for file {file_name} to bucket {bucket_name}: {type(e).__name__} - {str(e)}")
312
-
313
- return result
314
-
315
- def import_logs_from_json(self, json_or_file, logger=None):
316
- def log_message(message):
317
- if logger:
318
- logger.info(message)
319
-
320
- def log_warning(message, exc_info=False):
321
- if logger:
322
- logger.warning(message, exc_info=exc_info)
323
-
324
- try:
325
- if isinstance(json_or_file, str): # Load from string
326
- imported_logs = json.loads(json_or_file)
327
- elif hasattr(json_or_file, 'read'): # Load from file-like object
328
- imported_logs = json.load(json_or_file)
329
- self.add_logs(imported_logs)
330
- log_message("Successfully imported logs from json.")
331
- except Exception as e:
332
- log_warning(f"Failed to import logs from json: {type(e).__name__} - {str(e)}", exc_info=True)
333
-
334
- def _update_counts(self, log, remove=False):
335
- level_code = log["level_code"]
336
- level_name = log["level_name"]
337
-
338
- if remove:
339
- if level_code >= self.ERROR_START_CODE:
340
- self._errors_count -= 1
341
- elif self.WARNING_START_CODE <= level_code < self.ERROR_START_CODE:
342
- self._warnings_count -= 1
343
- elif self.NOTICE_START_CODE <= level_code < self.WARNING_START_CODE:
344
- self._notices_count -= 1
345
- elif self.SUCCESS_START_CODE <= level_code < self.NOTICE_START_CODE:
346
- self._successes_count -= 1
347
- elif self.INFO_START_CODE <= level_code < self.SUCCESS_START_CODE:
348
- self._infos_count -= 1
349
- self._by_level_counts[level_name] -= 1
350
- else:
351
- if level_code >= self.ERROR_START_CODE:
352
- self._errors_count += 1
353
- elif self.WARNING_START_CODE <= level_code < self.ERROR_START_CODE:
354
- self._warnings_count += 1
355
- elif self.NOTICE_START_CODE <= level_code < self.WARNING_START_CODE:
356
- self._notices_count += 1
357
- elif self.SUCCESS_START_CODE <= level_code < self.NOTICE_START_CODE:
358
- self._successes_count += 1
359
- elif self.INFO_START_CODE <= level_code < self.SUCCESS_START_CODE:
360
- self._infos_count += 1
361
- self._by_level_counts[level_name] += 1
362
-