ipulse-shared-core-ftredge 2.56__py3-none-any.whl → 3.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.

Files changed (25) hide show
  1. ipulse_shared_core_ftredge/__init__.py +10 -14
  2. ipulse_shared_core_ftredge/models/__init__.py +0 -1
  3. ipulse_shared_core_ftredge/models/organisation.py +61 -55
  4. ipulse_shared_core_ftredge/models/resource_catalog_item.py +97 -171
  5. ipulse_shared_core_ftredge/models/user_profile.py +3 -3
  6. ipulse_shared_core_ftredge/utils/__init__.py +3 -0
  7. ipulse_shared_core_ftredge/utils/utils_common.py +10 -0
  8. {ipulse_shared_core_ftredge-2.56.dist-info → ipulse_shared_core_ftredge-3.1.1.dist-info}/METADATA +5 -7
  9. ipulse_shared_core_ftredge-3.1.1.dist-info/RECORD +15 -0
  10. {ipulse_shared_core_ftredge-2.56.dist-info → ipulse_shared_core_ftredge-3.1.1.dist-info}/WHEEL +1 -1
  11. ipulse_shared_core_ftredge/enums/__init__.py +0 -28
  12. ipulse_shared_core_ftredge/enums/enums_common_utils.py +0 -171
  13. ipulse_shared_core_ftredge/enums/enums_data_eng.py +0 -44
  14. ipulse_shared_core_ftredge/enums/enums_module_fincore.py +0 -58
  15. ipulse_shared_core_ftredge/enums/enums_modules.py +0 -33
  16. ipulse_shared_core_ftredge/models/audit_log_firestore.py +0 -12
  17. ipulse_shared_core_ftredge/models/pulse_enums.py +0 -196
  18. ipulse_shared_core_ftredge/tests/__init__.py +0 -0
  19. ipulse_shared_core_ftredge/tests/test.py +0 -17
  20. ipulse_shared_core_ftredge/utils_common.py +0 -543
  21. ipulse_shared_core_ftredge/utils_gcp.py +0 -267
  22. ipulse_shared_core_ftredge/utils_templates_and_schemas.py +0 -155
  23. ipulse_shared_core_ftredge-2.56.dist-info/RECORD +0 -25
  24. {ipulse_shared_core_ftredge-2.56.dist-info → ipulse_shared_core_ftredge-3.1.1.dist-info}/LICENCE +0 -0
  25. {ipulse_shared_core_ftredge-2.56.dist-info → ipulse_shared_core_ftredge-3.1.1.dist-info}/top_level.txt +0 -0
@@ -1,543 +0,0 @@
1
- # pylint: disable=missing-module-docstring
2
- # pylint: disable=missing-function-docstring
3
- # pylint: disable=logging-fstring-interpolation
4
- # pylint: disable=line-too-long
5
- # pylint: disable=missing-class-docstring
6
- # pylint: disable=broad-exception-caught
7
- import traceback
8
- import json
9
- import uuid
10
- from datetime import datetime, timezone
11
- from contextlib import contextmanager
12
- from typing import List
13
- from google.cloud import logging as cloudlogging
14
- from ipulse_shared_core_ftredge.enums.enums_common_utils import TargetLogs, LogLevel, LogStatus
15
- from ipulse_shared_core_ftredge.utils_gcp import write_json_to_gcs
16
-
17
-
18
- # ["data_import","data_quality", "data_processing","data_general","data_persistance","metadata_quality", "metadata_processing", "metadata_persistance","metadata_general"]
19
-
20
- class ContextLog:
21
- MAX_FIELD_LINES = 26 # Define the maximum number of traceback lines to include
22
- MAX_FIELD_LENGTH=10000
23
-
24
- def __init__(self, level: LogLevel, base_context: str = None, collector_id: str = None,
25
- context: str = None, description: str = None,
26
- e: Exception = None, e_type: str = None, e_message: str = None, e_traceback: str = None,
27
- log_status: LogStatus = LogStatus.OPEN, subject: str = None, systems_impacted: List[str] = None
28
- ):
29
- if e is not None:
30
- e_type = type(e).__name__ if e_type is None else e_type
31
- e_message = str(e) if e_message is None else e_message
32
- e_traceback = traceback.format_exc() if e_traceback is None else e_traceback
33
- elif e_traceback is None and (e_type or e_message):
34
- e_traceback = traceback.format_exc()
35
-
36
- self.level = level
37
- self.subject = subject
38
- self.description = description
39
- self._base_context = base_context
40
- self._context = context
41
- self._systems_impacted = systems_impacted if systems_impacted else []
42
- self.collector_id = collector_id
43
- self.exception_type = e_type
44
- self.exception_message = e_message
45
- self.exception_traceback = e_traceback
46
- self.log_status = log_status
47
- self.timestamp = datetime.now(timezone.utc).isoformat()
48
-
49
- @property
50
- def base_context(self):
51
- return self._base_context
52
-
53
- @base_context.setter
54
- def base_context(self, value):
55
- self._base_context = value
56
-
57
- @property
58
- def context(self):
59
- return self._context
60
-
61
- @context.setter
62
- def context(self, value):
63
- self._context = value
64
-
65
- @property
66
- def systems_impacted(self):
67
- return self._systems_impacted
68
-
69
- @systems_impacted.setter
70
- def systems_impacted(self, list_of_si: List[str]):
71
- self._systems_impacted = list_of_si
72
-
73
- def add_system_impacted(self, system_impacted: str):
74
- if self._systems_impacted is None:
75
- self._systems_impacted = []
76
- self._systems_impacted.append(system_impacted)
77
-
78
- def remove_system_impacted(self, system_impacted: str):
79
- if self._systems_impacted is not None:
80
- self._systems_impacted.remove(system_impacted)
81
-
82
- def clear_systems_impacted(self):
83
- self._systems_impacted = []
84
-
85
- def _format_traceback(self, e_traceback, e_message):
86
- if not e_traceback or e_traceback == 'None\n':
87
- return None
88
-
89
- traceback_lines = e_traceback.splitlines()
90
-
91
- # Check if the traceback is within the limits
92
- if len(traceback_lines) <= self.MAX_FIELD_LINES and len(e_traceback) <= self.MAX_FIELD_LENGTH:
93
- return e_traceback
94
-
95
- # Remove lines that are part of the exception message if they are present in traceback
96
- message_lines = e_message.splitlines() if e_message else []
97
- if message_lines:
98
- for message_line in message_lines:
99
- if message_line in traceback_lines:
100
- traceback_lines.remove(message_line)
101
-
102
- # Filter out lines from third-party libraries (like site-packages)
103
- filtered_lines = [line for line in traceback_lines if "site-packages" not in line]
104
-
105
- # If filtering results in too few lines, revert to original traceback
106
- if len(filtered_lines) < 2:
107
- filtered_lines = traceback_lines
108
-
109
- # Combine standalone bracket lines with previous or next lines
110
- combined_lines = []
111
- for line in filtered_lines:
112
- if line.strip() in {"(", ")", "{", "}", "[", "]"} and combined_lines:
113
- combined_lines[-1] += " " + line.strip()
114
- else:
115
- combined_lines.append(line)
116
-
117
- # Ensure the number of lines doesn't exceed MAX_TRACEBACK_LINES
118
- if len(combined_lines) > self.MAX_FIELD_LINES:
119
- keep_lines_start = min(self.MAX_FIELD_LINES // 2, len(combined_lines))
120
- keep_lines_end = min(self.MAX_FIELD_LINES // 2, len(combined_lines) - keep_lines_start)
121
- combined_lines = (
122
- combined_lines[:keep_lines_start] +
123
- ['... (truncated) ...'] +
124
- combined_lines[-keep_lines_end:]
125
- )
126
-
127
- formatted_traceback = '\n'.join(combined_lines)
128
-
129
- # Ensure the total length doesn't exceed MAX_TRACEBACK_LENGTH
130
- if len(formatted_traceback) > self.MAX_FIELD_LENGTH:
131
- truncated_length = self.MAX_FIELD_LENGTH - len('... (truncated) ...')
132
- half_truncated_length = truncated_length // 2
133
- formatted_traceback = (
134
- formatted_traceback[:half_truncated_length] +
135
- '\n... (truncated) ...\n' +
136
- formatted_traceback[-half_truncated_length:]
137
- )
138
- return formatted_traceback
139
-
140
- def to_dict(self, max_field_len:int =10000, size_limit:float=256 * 1024 * 0.80):
141
- size_limit = int(size_limit) # Ensure size_limit is an integer
142
-
143
- # Unified list of all fields
144
- systems_impacted_str = f"{len(self.systems_impacted)} system(s): " + " ,,, ".join(self.systems_impacted) if self.systems_impacted else None
145
- fields = [
146
- ("log_status", str(self.log_status.name)),
147
- ("level_code", self.level.value),
148
- ("level_name", str(self.level.name)),
149
- ("base_context", str(self.base_context)),
150
- ("timestamp", str(self.timestamp)),
151
- ("collector_id", str(self.collector_id)),
152
- ("systems_impacted", systems_impacted_str),
153
- ("context", str(self.context)), # special sizing rules apply to it
154
- ("subject", str(self.subject)),
155
- ("description", str(self.description)),
156
- ("exception_type", str(self.exception_type)),
157
- ("exception_message", str(self.exception_message)),
158
- ("exception_traceback", str(self._format_traceback(self.exception_traceback,self.exception_message)))
159
- ]
160
-
161
- # Function to calculate the byte size of a JSON-encoded field
162
- def field_size(key, value):
163
- return len(json.dumps({key: value}).encode('utf-8'))
164
-
165
- # Function to truncate a value based on its type
166
- # Function to truncate a value based on its type
167
- def truncate_value(value, max_size):
168
- if isinstance(value, str):
169
- half_size = max_size // 2
170
- return value[:half_size] + '...' + value[-(max_size - half_size - 3):]
171
- return value
172
-
173
- # Ensure no field exceeds max_field_len
174
- for i, (key, value) in enumerate(fields):
175
- if isinstance(value, str) and len(value) > max_field_len:
176
- fields[i] = (key, truncate_value(value, max_field_len))
177
-
178
- # Ensure total size of the dict doesn't exceed size_limit
179
- total_size = sum(field_size(key, value) for key, value in fields)
180
- log_dict = {}
181
- truncated = False
182
-
183
- if total_size > size_limit:
184
- truncated = True
185
- remaining_size = size_limit
186
- remaining_fields = len(fields)
187
-
188
- for key, value in fields:
189
- if remaining_fields > 0:
190
- max_size_per_field = remaining_size // remaining_fields
191
- else:
192
- max_size_per_field = 0
193
-
194
- field_sz = field_size(key, value)
195
- if field_sz > max_size_per_field:
196
- value = truncate_value(value, max_size_per_field)
197
- field_sz = field_size(key, value)
198
-
199
- log_dict[key] = value
200
- remaining_size -= field_sz
201
- remaining_fields -= 1
202
- else:
203
- log_dict = dict(fields)
204
-
205
- log_dict['trunc'] = truncated
206
-
207
- return log_dict
208
-
209
- class Pipelinemon:
210
- ERROR_START_CODE = LogLevel.ERROR.value
211
- WARNING_START_CODE = LogLevel.WARNING.value
212
- NOTICE_START_CODE = LogLevel.NOTICE.value
213
- SUCCESS_START_CODE = LogLevel.SUCCESS.value
214
- INFO_START_CODE = LogLevel.INFO.value
215
-
216
- def __init__(self, base_context: str, target_logs: TargetLogs = TargetLogs.MIXED, logger_name=None, max_log_field_size:int =10000, max_log_dict_size:float=256 * 1024 * 0.80):
217
- self._id = str(uuid.uuid4())
218
- self._logs = []
219
- self._early_stop = False
220
- self._errors_count = 0
221
- self._warnings_count = 0
222
- self._notices_count = 0
223
- self._successes_count = 0
224
- self._infos_count = 0
225
- self._systems_impacted = []
226
- self._level_counts = {level.name: 0 for level in LogLevel}
227
- self._base_context = base_context
228
- self._context_stack = []
229
- self._target_logs = target_logs.value
230
- self._logger = self._initialize_logger(logger_name)
231
- self._max_log_field_size = max_log_field_size
232
- self._max_log_dict_size = max_log_dict_size
233
-
234
- def _initialize_logger(self, logger_name):
235
- if logger_name:
236
- logging_client = cloudlogging.Client()
237
- return logging_client.logger(logger_name)
238
- return None
239
-
240
- @contextmanager
241
- def context(self, context):
242
- self.push_context(context)
243
- try:
244
- yield
245
- finally:
246
- self.pop_context()
247
-
248
- def push_context(self, context):
249
- self._context_stack.append(context)
250
-
251
- def pop_context(self):
252
- if self._context_stack:
253
- self._context_stack.pop()
254
-
255
- @property
256
- def current_context(self):
257
- return " >> ".join(self._context_stack)
258
-
259
- @property
260
- def base_context(self):
261
- return self._base_context
262
-
263
- @property
264
- def id(self):
265
- return self._id
266
-
267
- @property
268
- def systems_impacted(self):
269
- return self._systems_impacted
270
-
271
- @systems_impacted.setter
272
- def systems_impacted(self, list_of_si: List[str]):
273
- self._systems_impacted = list_of_si
274
-
275
- def add_system_impacted(self, system_impacted: str):
276
- if self._systems_impacted is None:
277
- self._systems_impacted = []
278
- self._systems_impacted.append(system_impacted)
279
-
280
- def clear_systems_impacted(self):
281
- self._systems_impacted = []
282
-
283
- @property
284
- def max_log_field_size(self):
285
- return self._max_log_field_size
286
-
287
- @max_log_field_size.setter
288
- def max_log_field_size(self, value):
289
- self._max_log_field_size = value
290
-
291
- @property
292
- def max_log_dict_size(self):
293
- return self._max_log_dict_size
294
-
295
- @max_log_dict_size.setter
296
- def max_log_dict_size(self, value):
297
- self._max_log_dict_size = value
298
-
299
- @property
300
- def early_stop(self):
301
- return self._early_stop
302
-
303
- def set_early_stop(self, max_errors_tolerance: int, create_error_log=True, pop_context=False):
304
- self._early_stop = True
305
- if create_error_log:
306
- if pop_context:
307
- self.pop_context()
308
- self.add_log(ContextLog(level=LogLevel.ERROR_PIPELINE_THRESHOLD_REACHED,
309
- subject="EARLY_STOP",
310
- description=f"Total MAX_ERRORS_TOLERANCE of {max_errors_tolerance} has been reached."))
311
-
312
- def reset_early_stop(self):
313
- self._early_stop = False
314
-
315
-
316
- def add_log(self, log: ContextLog, ):
317
- if (self._target_logs == TargetLogs.SUCCESSES and log.level >=self.NOTICE_START_CODE) or \
318
- (self._target_logs == TargetLogs.WARNINGS_AND_ERRORS and log.level.value < self.WARNING_START_CODE):
319
- raise ValueError(f"Invalid log level {log.level.name} for Pipelinemon target logs setup: {self._target_logs}")
320
- log.base_context = self.base_context
321
- log.context = self.current_context
322
- log.collector_id = self.id
323
- log.systems_impacted = self.systems_impacted
324
- log_dict = log.to_dict(max_field_len=self.max_log_field_size, size_limit=self.max_log_dict_size)
325
- self._logs.append(log_dict)
326
- self._update_counts(log_dict)
327
-
328
- if self._logger:
329
- # We specifically want to avoid having an ERROR log level for this structured Pipelinemon reporting, to ensure Errors are alerting on Critical Application Services.
330
- # A single ERROR log level can be used for the entire pipeline, which shall be used at the end of the pipeline
331
- if log.level.value >= self.WARNING_START_CODE:
332
- self._logger.log_struct(log_dict, severity="WARNING")
333
- elif log.level.value >= self.NOTICE_START_CODE:
334
- self._logger.log_struct(log_dict, severity="NOTICE")
335
- else:
336
- self._logger.log_struct(log_dict, severity="INFO")
337
-
338
- def add_logs(self, logs: List[ContextLog]):
339
- for log in logs:
340
- self.add_log(log)
341
-
342
- def clear_logs_and_counts(self):
343
- self._logs = []
344
- self._errors_count = 0
345
- self._warnings_count = 0
346
- self._notices_count = 0
347
- self._successes_count = 0
348
- self._infos_count = 0
349
- self._level_counts = {level.name: 0 for level in LogLevel}
350
-
351
- def clear_logs(self):
352
- self._logs = []
353
-
354
- def get_all_logs(self):
355
- return self._logs
356
-
357
- def get_logs_for_level(self, level: LogLevel):
358
- return [log for log in self._logs if log["level_code"] == level.value]
359
-
360
- def get_logs_by_str_in_context(self, context_substring: str):
361
- return [
362
- log for log in self._logs
363
- if context_substring in log["context"]
364
- ]
365
-
366
- def contains_errors(self):
367
- return self._errors_count > 0
368
-
369
- def count_errors(self):
370
- return self._errors_count
371
-
372
- def contains_warnings_or_errors(self):
373
- return self._warnings_count > 0 or self._errors_count > 0
374
-
375
- def count_warnings_and_errors(self):
376
- return self._warnings_count + self._errors_count
377
-
378
- def count_warnings(self):
379
- return self._warnings_count
380
-
381
- def count_notices(self):
382
- return self._notices_count
383
-
384
- def count_successes(self):
385
- return self._successes_count
386
-
387
- def count_successes_with_notice(self):
388
- return self.count_logs_by_level(LogLevel.SUCCESS_WITH_NOTICES)
389
-
390
- def count_successes_no_notice(self):
391
- return self.count_logs_by_level(LogLevel.SUCCESS)
392
-
393
- def count_infos(self):
394
- return self._infos_count
395
-
396
- def count_all_logs(self):
397
- return len(self._logs)
398
-
399
- def count_logs_by_level(self, level: LogLevel):
400
- return self._level_counts.get(level.name, 0)
401
-
402
- def _count_logs(self, context_substring: str, exact_match=False, level_code_min=None, level_code_max=None):
403
- return sum(
404
- 1 for log in self._logs
405
- if (log["context"] == context_substring if exact_match else context_substring in log["context"]) and
406
- (level_code_min is None or log["level_code"] >= level_code_min) and
407
- (level_code_max is None or log["level_code"] <= level_code_max)
408
- )
409
-
410
- def count_logs_for_current_context(self):
411
- return self._count_logs(self.current_context, exact_match=True)
412
-
413
- def count_logs_for_current_and_nested_contexts(self):
414
- return self._count_logs(self.current_context)
415
-
416
- def count_logs_by_level_for_current_context(self, level: LogLevel):
417
- return self._count_logs(self.current_context, exact_match=True, level_code_min=level.value, level_code_max=level.value)
418
-
419
- def count_logs_by_level_for_current_and_nested_contexts(self, level: LogLevel):
420
- return self._count_logs(self.current_context, level_code_min=level.value, level_code_max=level.value)
421
-
422
- def count_errors_for_current_context(self):
423
- return self._count_logs(self.current_context, exact_match=True, level_code_min=self.ERROR_START_CODE)
424
-
425
- def count_errors_for_current_and_nested_contexts(self):
426
- return self._count_logs(self.current_context, level_code_min=self.ERROR_START_CODE)
427
-
428
- def count_warnings_and_errors_for_current_context(self):
429
- return self._count_logs(self.current_context, exact_match=True, level_code_min=self.WARNING_START_CODE)
430
-
431
- def count_warnings_and_errors_for_current_and_nested_contexts(self):
432
- return self._count_logs(self.current_context, level_code_min=self.WARNING_START_CODE)
433
-
434
- def count_warnings_for_current_context(self):
435
- return self._count_logs(self.current_context, exact_match=True, level_code_min=self.WARNING_START_CODE, level_code_max=self.ERROR_START_CODE - 1)
436
-
437
- def count_warnings_for_current_and_nested_contexts(self):
438
- return self._count_logs(self.current_context, level_code_min=self.WARNING_START_CODE, level_code_max=self.ERROR_START_CODE - 1)
439
-
440
- def count_notices_for_current_context(self):
441
- return self._count_logs(self.current_context, exact_match=True, level_code_min=self.NOTICE_START_CODE, level_code_max=self.WARNING_START_CODE-1)
442
-
443
- def count_notices_for_current_and_nested_contexts(self):
444
- return self._count_logs(self.current_context, level_code_min=self.NOTICE_START_CODE, level_code_max=self.WARNING_START_CODE-1)
445
-
446
- def count_successes_for_current_context(self):
447
- return self._count_logs(self.current_context, exact_match=True, level_code_min=self.SUCCESS_START_CODE, level_code_max=self.NOTICE_START_CODE-1)
448
-
449
- def count_successes_for_current_and_nested_contexts(self):
450
- return self._count_logs(self.current_context, level_code_min=self.SUCCESS_START_CODE, level_code_max=self.NOTICE_START_CODE-1)
451
-
452
- def count_infos_for_current_context(self):
453
- return self._count_logs(self.current_context, exact_match=True, level_code_min=self.INFO_START_CODE, level_code_max=self.SUCCESS_START_CODE-1)
454
-
455
- def count_infos_for_current_and_nested_contexts(self):
456
- return self._count_logs(self.current_context, level_code_min=self.INFO_START_CODE, level_code_max=self.SUCCESS_START_CODE-1)
457
-
458
- def export_logs_to_gcs_file(self, bucket_name, storage_client, file_prefix=None, file_name=None, top_level_context=None, save_locally=False, overwrite_if_exists=False, increment_if_exists=True, local_path=None, logger=None, max_retries=2):
459
- def log_message(message):
460
- if logger:
461
- logger.info(message)
462
-
463
- def log_error(message, exc_info=False):
464
- if logger:
465
- logger.error(message, exc_info=exc_info)
466
-
467
- if not file_prefix:
468
- file_prefix = self._target_logs
469
- if not file_name:
470
- timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
471
- if top_level_context:
472
- file_name = f"{file_prefix}_{timestamp}_{top_level_context}_len{len(self._logs)}.json"
473
- else:
474
- file_name = f"{file_prefix}_{timestamp}_len{len(self._logs)}.json"
475
-
476
- result = None
477
- try:
478
- result = write_json_to_gcs(
479
- bucket_name=bucket_name,
480
- storage_client=storage_client,
481
- data=self._logs,
482
- file_name=file_name,
483
- save_locally=save_locally,
484
- local_path=local_path,
485
- logger=logger,
486
- max_retries=max_retries,
487
- overwrite_if_exists=overwrite_if_exists,
488
- increment_if_exists=increment_if_exists
489
-
490
- )
491
- log_message(f"{file_prefix} successfully saved (overwritten={result.get('gcs_file_overwritten')}, incremented={result.get('gcs_file_saved_with_increment')}) to GCS at {result.get('gcs_path')} and locally at {result.get('local_path')}.")
492
- except Exception as e:
493
- log_error(f"Failed at export_logs_to_gcs_file for {file_prefix} for file {file_name} to bucket {bucket_name}: {type(e).__name__} - {str(e)}")
494
-
495
- return result
496
-
497
- def import_logs_from_json(self, json_or_file, logger=None):
498
- def log_message(message):
499
- if logger:
500
- logger.info(message)
501
-
502
- def log_warning(message, exc_info=False):
503
- if logger:
504
- logger.warning(message, exc_info=exc_info)
505
-
506
- try:
507
- if isinstance(json_or_file, str): # Load from string
508
- imported_logs = json.loads(json_or_file)
509
- elif hasattr(json_or_file, 'read'): # Load from file-like object
510
- imported_logs = json.load(json_or_file)
511
- self.add_logs(imported_logs)
512
- log_message("Successfully imported logs from json.")
513
- except Exception as e:
514
- log_warning(f"Failed to import logs from json: {type(e).__name__} - {str(e)}", exc_info=True)
515
-
516
- def _update_counts(self, log, remove=False):
517
- level_code = log["level_code"]
518
- level_name = log["level_name"]
519
-
520
- if remove:
521
- if level_code >= self.ERROR_START_CODE:
522
- self._errors_count -= 1
523
- elif self.WARNING_START_CODE <= level_code < self.ERROR_START_CODE:
524
- self._warnings_count -= 1
525
- elif self.NOTICE_START_CODE <= level_code < self.WARNING_START_CODE:
526
- self._notices_count -= 1
527
- elif self.SUCCESS_START_CODE <= level_code < self.NOTICE_START_CODE:
528
- self._successes_count -= 1
529
- elif self.INFO_START_CODE <= level_code < self.SUCCESS_START_CODE:
530
- self._infos_count -= 1
531
- self._level_counts[level_name] -= 1
532
- else:
533
- if level_code >= self.ERROR_START_CODE:
534
- self._errors_count += 1
535
- elif self.WARNING_START_CODE <= level_code < self.ERROR_START_CODE:
536
- self._warnings_count += 1
537
- elif self.NOTICE_START_CODE <= level_code < self.WARNING_START_CODE:
538
- self._notices_count += 1
539
- elif self.SUCCESS_START_CODE <= level_code < self.NOTICE_START_CODE:
540
- self._successes_count += 1
541
- elif self.INFO_START_CODE <= level_code < self.SUCCESS_START_CODE:
542
- self._infos_count += 1
543
- self._level_counts[level_name] += 1