ipulse-shared-core-ftredge 18.0.1__py3-none-any.whl → 20.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.

Files changed (35) hide show
  1. ipulse_shared_core_ftredge/__init__.py +1 -12
  2. ipulse_shared_core_ftredge/exceptions/__init__.py +47 -0
  3. ipulse_shared_core_ftredge/exceptions/user_exceptions.py +219 -0
  4. ipulse_shared_core_ftredge/models/__init__.py +0 -2
  5. ipulse_shared_core_ftredge/models/base_data_model.py +6 -6
  6. ipulse_shared_core_ftredge/models/user_auth.py +59 -4
  7. ipulse_shared_core_ftredge/models/user_profile.py +41 -7
  8. ipulse_shared_core_ftredge/models/user_status.py +44 -138
  9. ipulse_shared_core_ftredge/monitoring/__init__.py +7 -0
  10. ipulse_shared_core_ftredge/monitoring/microservmon.py +526 -0
  11. ipulse_shared_core_ftredge/monitoring/tracemon.py +320 -0
  12. ipulse_shared_core_ftredge/services/__init__.py +21 -14
  13. ipulse_shared_core_ftredge/services/base/__init__.py +12 -0
  14. ipulse_shared_core_ftredge/services/base/base_firestore_service.py +520 -0
  15. ipulse_shared_core_ftredge/services/cache_aware_firestore_service.py +44 -8
  16. ipulse_shared_core_ftredge/services/charging_service.py +1 -1
  17. ipulse_shared_core_ftredge/services/user/__init__.py +37 -0
  18. ipulse_shared_core_ftredge/services/user/iam_management_operations.py +326 -0
  19. ipulse_shared_core_ftredge/services/user/subscription_management_operations.py +384 -0
  20. ipulse_shared_core_ftredge/services/user/user_account_operations.py +479 -0
  21. ipulse_shared_core_ftredge/services/user/user_auth_operations.py +305 -0
  22. ipulse_shared_core_ftredge/services/user/user_core_service.py +651 -0
  23. ipulse_shared_core_ftredge/services/user/user_holistic_operations.py +436 -0
  24. {ipulse_shared_core_ftredge-18.0.1.dist-info → ipulse_shared_core_ftredge-20.0.1.dist-info}/METADATA +1 -1
  25. ipulse_shared_core_ftredge-20.0.1.dist-info/RECORD +42 -0
  26. ipulse_shared_core_ftredge/models/organization_profile.py +0 -96
  27. ipulse_shared_core_ftredge/models/user_profile_update.py +0 -39
  28. ipulse_shared_core_ftredge/services/base_firestore_service.py +0 -249
  29. ipulse_shared_core_ftredge/services/fastapiservicemon.py +0 -140
  30. ipulse_shared_core_ftredge/services/servicemon.py +0 -240
  31. ipulse_shared_core_ftredge-18.0.1.dist-info/RECORD +0 -33
  32. ipulse_shared_core_ftredge/{services/base_service_exceptions.py → exceptions/base_exceptions.py} +1 -1
  33. {ipulse_shared_core_ftredge-18.0.1.dist-info → ipulse_shared_core_ftredge-20.0.1.dist-info}/WHEEL +0 -0
  34. {ipulse_shared_core_ftredge-18.0.1.dist-info → ipulse_shared_core_ftredge-20.0.1.dist-info}/licenses/LICENCE +0 -0
  35. {ipulse_shared_core_ftredge-18.0.1.dist-info → ipulse_shared_core_ftredge-20.0.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,526 @@
1
+ """
2
+ Microservmon - Lightweight logging collector for microservice requests and functions and API endpoints
3
+ """
4
+ import uuid
5
+ import time
6
+ from datetime import datetime, timezone
7
+ from typing import Dict, Any, Optional
8
+ from contextlib import contextmanager
9
+ from collections import defaultdict
10
+ from ipulse_shared_base_ftredge import (LogLevel, AbstractResource,
11
+ ProgressStatus, Action,
12
+ Alert, StructLog)
13
+ from ipulse_shared_base_ftredge.status import StatusCounts, map_progress_status_to_log_level, eval_statuses
14
+
15
+ class Microservmon:
16
+ """
17
+ Microservmon is a lightweight version of Pipelinemon designed specifically for monitoring
18
+ microservice events such as HTTP Requests, PubSub Triggers etc within execution environment like
19
+ Cloud Functions, Cloud Run etc.
20
+
21
+ It provides:
22
+ 1. Structured logging with context tracking
23
+ 2. Performance metrics capture per trace
24
+ 3. Microservice health monitoring
25
+ 4. Integration with FastAPI request/response cycle
26
+ 5. Memory-efficient trace lifecycle management
27
+ """
28
+
29
+ def __init__(self, logger,
30
+ base_context: str,
31
+ microservice_name: str,
32
+ max_log_field_len: Optional[int] = 8000,
33
+ max_log_dict_byte_size: Optional[float] = 256 * 1024 * 0.80,
34
+ exclude_none_from_logs: bool = True):
35
+ """
36
+ Initialize Microservmon with basic configuration.
37
+
38
+ Args:
39
+ logger: The logger instance to use for logging
40
+ base_context: Base context information for all logs
41
+ microservice_name: Name of the microservice being monitored
42
+ max_log_field_len: Maximum length for any string field in logs
43
+ max_log_dict_byte_size: Maximum byte size for log dictionary
44
+ exclude_none_from_logs: Whether to exclude None values from log output (default: True)
45
+ """
46
+ # Set up microservice tracking details
47
+ self._microservice_name = microservice_name
48
+ self._microservice_start_time = time.time()
49
+
50
+ timestamp = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
51
+ uuid_suffix = str(uuid.uuid4())[:8] # Take first 8 chars of UUID
52
+ self._id = f"{timestamp}_{uuid_suffix}"
53
+
54
+ # Set up context handling
55
+ self._base_context = base_context
56
+ self._context_stack = []
57
+
58
+ # Configure logging
59
+ self._logger = logger
60
+ self._max_log_field_len = max_log_field_len
61
+ self._max_log_dict_byte_size = max_log_dict_byte_size
62
+ self._exclude_none_from_logs = exclude_none_from_logs
63
+
64
+ # Trace-based tracking - key change from original
65
+ self._active_traces: Dict[str, Dict[str, Any]] = {}
66
+
67
+ # Microservice-wide metrics (aggregated across all traces)
68
+ self._microservice_metrics = {
69
+ "total_traces": 0,
70
+ "active_traces": 0,
71
+ "completed_traces": 0,
72
+ "failed_traces": 0,
73
+ "by_event_count": defaultdict(int),
74
+ "by_level_code_count": defaultdict(int),
75
+ "status_counts": StatusCounts() # Add StatusCounts for better status tracking
76
+ }
77
+
78
+ # Log microservice startup
79
+ self._log_microservice_start()
80
+
81
+ def _log_microservice_start(self):
82
+ """Log the microservice instance startup."""
83
+ startup_log = StructLog(
84
+ level=LogLevel.INFO,
85
+ resource=AbstractResource.MICROSERVMON,
86
+ action=Action.EXECUTE,
87
+ progress_status=ProgressStatus.STARTED,
88
+ description=f"Microservice {self.microservice_name} instance started",
89
+ collector_id=self.id,
90
+ base_context=self.base_context,
91
+ context="microservice_startup"
92
+ )
93
+ self._write_log_to_logger(startup_log)
94
+
95
+ @property
96
+ def id(self) -> str:
97
+ """Get the unique ID for this microservice instance."""
98
+ return self._id
99
+
100
+ @property
101
+ def base_context(self) -> str:
102
+ """Get the base context for this microservice execution."""
103
+ return self._base_context
104
+
105
+ @property
106
+ def microservice_name(self) -> str:
107
+ """Get the microservice name being monitored."""
108
+ return self._microservice_name
109
+
110
+ @property
111
+ def microservice_metrics(self) -> Dict[str, Any]:
112
+ """Get the current microservice-wide metrics."""
113
+ metrics = self._microservice_metrics.copy()
114
+ metrics["by_event_count"] = dict(metrics["by_event_count"])
115
+ metrics["by_level_code_count"] = dict(metrics["by_level_code_count"])
116
+ return metrics
117
+
118
+ @property
119
+ def active_trace_count(self) -> int:
120
+ """Get count of currently active traces."""
121
+ return len(self._active_traces)
122
+
123
+ @property
124
+ def current_context(self) -> str:
125
+ """Get the current context stack as a string."""
126
+ return " >> ".join(self._context_stack) if self._context_stack else "root"
127
+
128
+ @property
129
+ def current_trace_id(self) -> Optional[str]:
130
+ """Get the current trace_id from context stack (first element if present)."""
131
+ return self._context_stack[0] if self._context_stack else None
132
+
133
+ @contextmanager
134
+ def context(self, context_name: str):
135
+ """
136
+ Context manager for tracking execution context.
137
+ Note: This is for microservice-wide context, not trace-specific.
138
+
139
+ Args:
140
+ context_name: The name of the current execution context
141
+ """
142
+ self.push_context(context_name)
143
+ try:
144
+ yield
145
+ finally:
146
+ self.pop_context()
147
+
148
+ @contextmanager
149
+ def trace(self, description: Optional[str] = None):
150
+ """
151
+ Context manager for trace lifecycle management.
152
+ Automatically starts and ends a trace, managing the context stack.
153
+
154
+ Args:
155
+ description: Optional description of the trace
156
+
157
+ Yields:
158
+ str: The trace_id for the current trace
159
+ """
160
+ trace_id = self.start_trace(description)
161
+ try:
162
+ yield trace_id
163
+ finally:
164
+ self.end_trace(trace_id)
165
+
166
+ def push_context(self, context: str):
167
+ """Add a context level to the stack."""
168
+ self._context_stack.append(context)
169
+
170
+ def pop_context(self):
171
+ """Remove the most recent context from the stack."""
172
+ if self._context_stack:
173
+ return self._context_stack.pop()
174
+
175
+ def start_trace(self, description: Optional[str] = None) -> str:
176
+ """
177
+ Start monitoring a new trace (request/event) with auto-generated trace ID.
178
+
179
+ Args:
180
+ description: Optional description of the trace
181
+
182
+ Returns:
183
+ str: Auto-generated trace_id
184
+ """
185
+ # Auto-generate trace ID
186
+ timestamp = datetime.now(timezone.utc).strftime('%H%M%S_%f')[:-3] # Include milliseconds
187
+ trace_id = f"trace_{timestamp}_{str(uuid.uuid4())[:6]}"
188
+
189
+ start_time = time.time()
190
+
191
+ # Initialize trace metrics with StatusCounts
192
+ self._active_traces[trace_id] = {
193
+ "start_time": start_time,
194
+ "status": ProgressStatus.IN_PROGRESS.name,
195
+ "by_event_count": defaultdict(int),
196
+ "by_level_code_count": defaultdict(int),
197
+ "status_counts": StatusCounts() # Track status progression within trace
198
+ }
199
+
200
+ # Add initial status to trace
201
+ self._active_traces[trace_id]["status_counts"].add_status(ProgressStatus.IN_PROGRESS)
202
+
203
+ # Update microservice metrics
204
+ self._microservice_metrics["total_traces"] += 1
205
+ self._microservice_metrics["active_traces"] = len(self._active_traces)
206
+ self._microservice_metrics["status_counts"].add_status(ProgressStatus.IN_PROGRESS)
207
+
208
+ # Push trace_id as first element in context stack
209
+ self._context_stack.insert(0, trace_id)
210
+
211
+ # Log the trace start using StructLog
212
+ msg = description if description else f"Starting trace {trace_id}"
213
+ start_log = StructLog(
214
+ level=LogLevel.INFO,
215
+ description=msg,
216
+ resource=AbstractResource.MICROSERVICE_TRACE,
217
+ action=Action.EXECUTE,
218
+ progress_status=ProgressStatus.IN_PROGRESS
219
+ )
220
+ self.log(start_log, trace_id)
221
+
222
+ return trace_id
223
+
224
+ def end_trace(self, trace_id: str, force_status: Optional[ProgressStatus] = None) -> None:
225
+ """
226
+ End monitoring for a trace and record final metrics.
227
+ Status is automatically calculated based on trace metrics unless forced.
228
+
229
+ Args:
230
+ trace_id: The trace identifier to end
231
+ force_status: Optional status to force, overriding automatic calculation
232
+ """
233
+ if trace_id not in self._active_traces:
234
+ # Log warning about unknown trace using StructLog
235
+ warning_log = StructLog(
236
+ level=LogLevel.WARNING,
237
+ description=f"Attempted to end unknown trace: {trace_id}",
238
+ resource=AbstractResource.MICROSERVICE_TRACE,
239
+ action=Action.EXECUTE,
240
+ progress_status=ProgressStatus.UNFINISHED,
241
+ alert=Alert.NOT_FOUND
242
+ )
243
+ self.log(warning_log)
244
+ return
245
+
246
+ trace_metrics = self._active_traces[trace_id]
247
+
248
+ # Calculate duration
249
+ end_time = time.time()
250
+ duration_ms = int((end_time - trace_metrics["start_time"]) * 1000)
251
+ trace_metrics["duration_ms"] = duration_ms
252
+ trace_metrics["end_time"] = end_time
253
+
254
+ # Get readable error/warning counts using existing helper functions
255
+ error_count = self._get_error_count_for_trace(trace_id)
256
+ warning_count = self._get_warning_count_for_trace(trace_id)
257
+
258
+ # Use status helpers for intelligent status calculation
259
+ if force_status is not None:
260
+ final_status = force_status
261
+ level = map_progress_status_to_log_level(final_status)
262
+ else:
263
+ # Build status list based on log levels for evaluation
264
+ status_list = []
265
+ if error_count > 0:
266
+ status_list.append(ProgressStatus.FINISHED_WITH_ISSUES)
267
+ elif warning_count > 0:
268
+ status_list.append(ProgressStatus.DONE_WITH_WARNINGS)
269
+ else:
270
+ status_list.append(ProgressStatus.DONE)
271
+
272
+ # Use eval_statuses for consistent status evaluation
273
+ final_status = eval_statuses(
274
+ status_list,
275
+ fail_or_unfinish_if_any_pending=True, # Since this is end of trace
276
+ issues_allowed=True
277
+ )
278
+ level = map_progress_status_to_log_level(final_status)
279
+
280
+ # Update trace status
281
+ trace_metrics["status"] = final_status.name
282
+ trace_metrics["status_counts"].add_status(final_status)
283
+
284
+ # Prepare summary message
285
+ status_source = "FORCED" if force_status is not None else "AUTO"
286
+ summary_msg = (
287
+ f"Trace {trace_id} completed with status {final_status.name} ({status_source}). "
288
+ f"Duration: {duration_ms}ms. "
289
+ f"Errors: {error_count}, Warnings: {warning_count}"
290
+ )
291
+
292
+ # Log the completion using StructLog
293
+ completion_log = StructLog(
294
+ level=level,
295
+ description=summary_msg,
296
+ resource=AbstractResource.MICROSERVMON,
297
+ action=Action.EXECUTE,
298
+ progress_status=final_status
299
+ )
300
+ self.log(completion_log, trace_id)
301
+
302
+ # Update microservice-wide metrics using StatusCounts
303
+ self._microservice_metrics["completed_traces"] += 1
304
+ if final_status in ProgressStatus.failure_statuses():
305
+ self._microservice_metrics["failed_traces"] += 1
306
+
307
+ # Add final status to microservice status counts
308
+ self._microservice_metrics["status_counts"].add_status(final_status)
309
+
310
+ # Aggregate trace metrics to microservice level
311
+ for event, count in trace_metrics["by_event_count"].items():
312
+ self._microservice_metrics["by_event_count"][event] += count
313
+ for level_code, count in trace_metrics["by_level_code_count"].items():
314
+ self._microservice_metrics["by_level_code_count"][level_code] += count
315
+
316
+ # Clean up trace data from memory
317
+ del self._active_traces[trace_id]
318
+ self._microservice_metrics["active_traces"] = len(self._active_traces)
319
+
320
+ # Remove trace_id from context stack if it's the first element
321
+ if self._context_stack and self._context_stack[0] == trace_id:
322
+ self._context_stack.pop(0)
323
+
324
+ def log(self, log: StructLog, trace_id: Optional[str] = None) -> None:
325
+ """
326
+ Log a StructLog message with trace context.
327
+
328
+ Args:
329
+ log: StructLog instance to log
330
+ trace_id: Optional trace ID for trace-specific logging. If not provided,
331
+ will use current_trace_id from context stack
332
+ """
333
+ # Use provided trace_id or infer from context stack
334
+ effective_trace_id = trace_id or self.current_trace_id
335
+
336
+ # Calculate elapsed time
337
+ elapsed_ms = None
338
+ if effective_trace_id and effective_trace_id in self._active_traces:
339
+ start_time = self._active_traces[effective_trace_id]["start_time"]
340
+ elapsed_ms = int((time.time() - start_time) * 1000)
341
+ else:
342
+ # Use microservice start time if no trace
343
+ elapsed_ms = int((time.time() - self._microservice_start_time) * 1000)
344
+
345
+ # Set microservice-specific context on the log
346
+ log.collector_id = self.id
347
+ log.base_context = self.base_context
348
+
349
+ # Context with trace_id first, then other context elements
350
+ if effective_trace_id:
351
+ context_parts = [effective_trace_id]
352
+ # Add non-trace_id context elements (skip first element if it's trace_id)
353
+ other_contexts = [ctx for ctx in self._context_stack if ctx != effective_trace_id]
354
+ context_parts.extend(other_contexts)
355
+ log.context = " >> ".join(context_parts)
356
+ else:
357
+ log.context = self.current_context
358
+
359
+ log.trace_id = effective_trace_id
360
+
361
+ # Append elapsed time to existing notes
362
+ existing_note = log.note or ""
363
+ elapsed_note = f"elapsed_ms: {elapsed_ms}"
364
+ log.note = f"{existing_note}; {elapsed_note}" if existing_note else elapsed_note
365
+
366
+ # Update metrics for the trace or microservice
367
+ self._update_counts(log, effective_trace_id)
368
+
369
+ # Write to logger
370
+ self._write_log_to_logger(log)
371
+
372
+ def _update_counts(self, log: StructLog, trace_id: Optional[str] = None):
373
+ """Update counts for event tracking."""
374
+ event_tuple = log.getEvent()
375
+ level = log.level
376
+
377
+ # Update trace-specific metrics if trace_id provided
378
+ if trace_id and trace_id in self._active_traces:
379
+ trace_metrics = self._active_traces[trace_id]
380
+ trace_metrics["by_event_count"][event_tuple] += 1
381
+ trace_metrics["by_level_code_count"][level.value] += 1
382
+
383
+ def _get_error_count_for_trace(self, trace_id: str) -> int:
384
+ """Get total error count (ERROR + CRITICAL) for a specific trace."""
385
+ if trace_id not in self._active_traces:
386
+ return 0
387
+
388
+ trace_metrics = self._active_traces[trace_id]
389
+ return (trace_metrics["by_level_code_count"].get(LogLevel.ERROR.value, 0) +
390
+ trace_metrics["by_level_code_count"].get(LogLevel.CRITICAL.value, 0))
391
+
392
+ def _get_warning_count_for_trace(self, trace_id: str) -> int:
393
+ """Get warning count for a specific trace."""
394
+ if trace_id not in self._active_traces:
395
+ return 0
396
+
397
+ trace_metrics = self._active_traces[trace_id]
398
+ return trace_metrics["by_level_code_count"].get(LogLevel.WARNING.value, 0)
399
+
400
+ def get_readable_level_counts_for_trace(self, trace_id: str) -> Dict[str, int]:
401
+ """Get readable level counts for a specific trace."""
402
+ if trace_id not in self._active_traces:
403
+ return {}
404
+
405
+ trace_metrics = self._active_traces[trace_id]
406
+ readable_counts = {}
407
+
408
+ for level_code, count in trace_metrics["by_level_code_count"].items():
409
+ if count > 0:
410
+ # Convert level code back to LogLevel enum and get name
411
+ for level in LogLevel:
412
+ if level.value == level_code:
413
+ readable_counts[level.name] = count
414
+ break
415
+
416
+ return readable_counts
417
+
418
+ def get_readable_microservice_level_counts(self) -> Dict[str, int]:
419
+ """Get readable level counts for the entire microservice."""
420
+ readable_counts = {}
421
+
422
+ for level_code, count in self._microservice_metrics["by_level_code_count"].items():
423
+ if count > 0:
424
+ # Convert level code back to LogLevel enum and get name
425
+ for level in LogLevel:
426
+ if level.value == level_code:
427
+ readable_counts[level.name] = count
428
+ break
429
+
430
+ return readable_counts
431
+
432
+ def get_microservice_status_summary(self) -> Dict[str, Any]:
433
+ """Get comprehensive status summary using StatusCounts"""
434
+ status_counts = self._microservice_metrics["status_counts"]
435
+
436
+ return {
437
+ "microservice_name": self.microservice_name,
438
+ "microservice_id": self.id,
439
+ "active_traces": self.active_trace_count,
440
+ "total_traces": self._microservice_metrics["total_traces"],
441
+ "completed_traces": self._microservice_metrics["completed_traces"],
442
+ "failed_traces": self._microservice_metrics["failed_traces"],
443
+ "status_breakdown": status_counts.get_count_breakdown(),
444
+ "completion_rate": status_counts.completion_rate,
445
+ "success_rate": status_counts.success_rate,
446
+ "has_issues": status_counts.has_issues,
447
+ "has_failures": status_counts.has_failures,
448
+ "readable_level_counts": self.get_readable_microservice_level_counts()
449
+ }
450
+
451
+ def evaluate_microservice_health(self) -> Dict[str, Any]:
452
+ """Evaluate overall microservice health using status helpers"""
453
+ status_counts = self._microservice_metrics["status_counts"]
454
+
455
+ # Determine overall health status
456
+ if status_counts.has_failures:
457
+ health_status = "UNHEALTHY"
458
+ health_level = LogLevel.ERROR
459
+ elif status_counts.has_issues:
460
+ health_status = "DEGRADED"
461
+ health_level = LogLevel.WARNING
462
+ elif status_counts.has_warnings:
463
+ health_status = "WARNING"
464
+ health_level = LogLevel.WARNING
465
+ else:
466
+ health_status = "HEALTHY"
467
+ health_level = LogLevel.INFO
468
+
469
+ return {
470
+ "health_status": health_status,
471
+ "health_level": health_level.name,
472
+ "summary": status_counts.get_summary(),
473
+ "metrics": self.get_microservice_status_summary()
474
+ }
475
+
476
+ def log_health_check(self) -> None:
477
+ """Log a health check using the status evaluation"""
478
+ health = self.evaluate_microservice_health()
479
+
480
+ health_log = StructLog(
481
+ level=LogLevel[health["health_level"]],
482
+ description=f"Microservice health check: {health['health_status']}",
483
+ resource=AbstractResource.MICROSERVMON,
484
+ action=Action.VALIDATE,
485
+ progress_status=ProgressStatus.DONE,
486
+ note=health["summary"]
487
+ )
488
+ self.log(health_log)
489
+
490
+ # Legacy compatibility methods
491
+ def start(self, description: Optional[str] = None) -> None:
492
+ """Legacy method for backward compatibility."""
493
+ trace_id = self.start_trace(description)
494
+
495
+ def end(self, force_status: Optional[ProgressStatus] = None) -> Dict[str, Any]:
496
+ """Legacy method for backward compatibility."""
497
+ # Find the most recent trace or create a dummy one
498
+ if self._active_traces:
499
+ trace_id = max(self._active_traces.keys())
500
+ self.end_trace(trace_id, force_status)
501
+ # Return microservice metrics for legacy compatibility
502
+ return self.microservice_metrics
503
+ return {}
504
+
505
+ @property
506
+ def metrics(self) -> Dict[str, Any]:
507
+ """Legacy property for backward compatibility."""
508
+ return self.microservice_metrics
509
+
510
+ def _write_log_to_logger(self, log: StructLog):
511
+ """Write structured log to the logger."""
512
+ log_dict = log.to_dict(
513
+ max_field_len=self._max_log_field_len,
514
+ byte_size_limit=self._max_log_dict_byte_size,
515
+ exclude_none=self._exclude_none_from_logs
516
+ )
517
+
518
+ # Write to logger based on level
519
+ if log.level.value >= LogLevel.ERROR.value:
520
+ self._logger.error(log_dict)
521
+ elif log.level.value >= LogLevel.WARNING.value:
522
+ self._logger.warning(log_dict)
523
+ elif log.level.value >= LogLevel.INFO.value:
524
+ self._logger.info(log_dict)
525
+ else:
526
+ self._logger.debug(log_dict)