ipulse-shared-core-ftredge 18.0.1__py3-none-any.whl → 19.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.

Files changed (34) hide show
  1. ipulse_shared_core_ftredge/__init__.py +1 -12
  2. ipulse_shared_core_ftredge/exceptions/__init__.py +47 -0
  3. ipulse_shared_core_ftredge/exceptions/user_exceptions.py +219 -0
  4. ipulse_shared_core_ftredge/models/__init__.py +0 -2
  5. ipulse_shared_core_ftredge/models/base_data_model.py +6 -6
  6. ipulse_shared_core_ftredge/models/user_auth.py +59 -4
  7. ipulse_shared_core_ftredge/models/user_profile.py +41 -7
  8. ipulse_shared_core_ftredge/models/user_status.py +44 -138
  9. ipulse_shared_core_ftredge/monitoring/__init__.py +5 -0
  10. ipulse_shared_core_ftredge/monitoring/microservmon.py +483 -0
  11. ipulse_shared_core_ftredge/services/__init__.py +21 -14
  12. ipulse_shared_core_ftredge/services/base/__init__.py +12 -0
  13. ipulse_shared_core_ftredge/services/base/base_firestore_service.py +520 -0
  14. ipulse_shared_core_ftredge/services/cache_aware_firestore_service.py +44 -8
  15. ipulse_shared_core_ftredge/services/charging_service.py +1 -1
  16. ipulse_shared_core_ftredge/services/user/__init__.py +37 -0
  17. ipulse_shared_core_ftredge/services/user/iam_management_operations.py +326 -0
  18. ipulse_shared_core_ftredge/services/user/subscription_management_operations.py +384 -0
  19. ipulse_shared_core_ftredge/services/user/user_account_operations.py +479 -0
  20. ipulse_shared_core_ftredge/services/user/user_auth_operations.py +305 -0
  21. ipulse_shared_core_ftredge/services/user/user_core_service.py +651 -0
  22. ipulse_shared_core_ftredge/services/user/user_holistic_operations.py +436 -0
  23. {ipulse_shared_core_ftredge-18.0.1.dist-info → ipulse_shared_core_ftredge-19.0.1.dist-info}/METADATA +1 -1
  24. ipulse_shared_core_ftredge-19.0.1.dist-info/RECORD +41 -0
  25. ipulse_shared_core_ftredge/models/organization_profile.py +0 -96
  26. ipulse_shared_core_ftredge/models/user_profile_update.py +0 -39
  27. ipulse_shared_core_ftredge/services/base_firestore_service.py +0 -249
  28. ipulse_shared_core_ftredge/services/fastapiservicemon.py +0 -140
  29. ipulse_shared_core_ftredge/services/servicemon.py +0 -240
  30. ipulse_shared_core_ftredge-18.0.1.dist-info/RECORD +0 -33
  31. ipulse_shared_core_ftredge/{services/base_service_exceptions.py → exceptions/base_exceptions.py} +1 -1
  32. {ipulse_shared_core_ftredge-18.0.1.dist-info → ipulse_shared_core_ftredge-19.0.1.dist-info}/WHEEL +0 -0
  33. {ipulse_shared_core_ftredge-18.0.1.dist-info → ipulse_shared_core_ftredge-19.0.1.dist-info}/licenses/LICENCE +0 -0
  34. {ipulse_shared_core_ftredge-18.0.1.dist-info → ipulse_shared_core_ftredge-19.0.1.dist-info}/top_level.txt +0 -0
@@ -1,249 +0,0 @@
1
- """ Base class for Firestore services with common CRUD operations """
2
- from typing import Dict, Any, List, TypeVar, Generic, Optional
3
- import logging
4
- import time
5
- from datetime import datetime, timezone
6
- from pydantic import BaseModel
7
- from google.cloud import firestore
8
- from .base_service_exceptions import ResourceNotFoundError, ValidationError, ServiceError
9
-
10
- T = TypeVar('T', bound=BaseModel)
11
-
12
- class BaseFirestoreService(Generic[T]):
13
- """Base class for Firestore services with common CRUD operations"""
14
-
15
- def __init__(
16
- self,
17
- db: firestore.Client,
18
- collection_name: str,
19
- resource_type: str,
20
- logger: logging.Logger,
21
- timeout: float = 15.0 # Default to 15 seconds, but allow override
22
- ):
23
- self.db = db
24
- self.collection_name = collection_name
25
- self.resource_type = resource_type
26
- self.logger = logger
27
- self.timeout = timeout # Store the timeout as an instance attribute
28
- self.logger.info(f"Initialized {self.resource_type} service with timeout={timeout}s")
29
-
30
- async def create_document(self, doc_id: str, data: T, creator_uid: str) -> Dict[str, Any]:
31
- """Standard create method with audit fields"""
32
- try:
33
- current_time = datetime.now(timezone.utc)
34
- doc_data = data.model_dump(mode='json')
35
-
36
- # Add audit fields
37
- doc_data.update({
38
- 'created_at': current_time.isoformat(),
39
- 'created_by': creator_uid,
40
- 'updated_at': current_time.isoformat(),
41
- 'updated_by': creator_uid
42
- })
43
-
44
- doc_ref = self.db.collection(self.collection_name).document(doc_id)
45
- # Apply timeout to the set operation
46
- doc_ref.set(doc_data, timeout=self.timeout)
47
-
48
- self.logger.info(f"Created {self.resource_type}: {doc_id}")
49
- return doc_data
50
-
51
- except Exception as e:
52
- self.logger.error(f"Error creating {self.resource_type}: {e}", exc_info=True)
53
- raise ServiceError(
54
- operation=f"creating {self.resource_type}",
55
- error=e,
56
- resource_type=self.resource_type,
57
- resource_id=doc_id
58
- ) from e
59
-
60
- async def create_batch_documents(self, documents: List[T], creator_uid: str) -> List[Dict[str, Any]]:
61
- """Standard batch create method"""
62
- try:
63
- batch = self.db.batch()
64
- current_time = datetime.now(timezone.utc)
65
- created_docs = []
66
-
67
- for doc in documents:
68
- doc_data = doc.model_dump(mode='json')
69
- doc_data.update({
70
- 'created_at': current_time.isoformat(),
71
- 'created_by': creator_uid,
72
- 'updated_at': current_time.isoformat(),
73
- 'updated_by': creator_uid
74
- })
75
-
76
- doc_ref = self.db.collection(self.collection_name).document(doc_data.get('id'))
77
- batch.set(doc_ref, doc_data)
78
- created_docs.append(doc_data)
79
-
80
- # Apply timeout to the commit operation
81
- batch.commit(timeout=self.timeout)
82
- self.logger.info(f"Created {len(documents)} {self.resource_type}s in batch")
83
- return created_docs
84
-
85
- except Exception as e:
86
- self.logger.error(f"Error batch creating {self.resource_type}s: {e}", exc_info=True)
87
- raise ServiceError(
88
- operation=f"batch creating {self.resource_type}s",
89
- error=e,
90
- resource_type=self.resource_type,
91
- resource_id=doc_data.get('id')
92
- ) from e
93
-
94
- async def get_document(self, doc_id: str) -> Dict[str, Any]:
95
- """Get a document by ID with standardized error handling"""
96
- self.logger.debug(f"Getting {self.resource_type} document: {doc_id} with timeout={self.timeout}s")
97
- start_time = time.time()
98
-
99
- try:
100
- doc_ref = self.db.collection(self.collection_name).document(doc_id)
101
-
102
- # Apply timeout to the get operation
103
- doc = doc_ref.get(timeout=self.timeout)
104
-
105
- elapsed = (time.time() - start_time) * 1000
106
- self.logger.debug(f"Firestore get for {doc_id} completed in {elapsed:.2f}ms")
107
-
108
- if not doc.exists:
109
- self.logger.warning(f"Document {doc_id} not found in {self.collection_name}")
110
- raise ResourceNotFoundError(
111
- resource_type=self.resource_type,
112
- resource_id=doc_id,
113
- additional_info={"collection": self.collection_name}
114
- )
115
-
116
- return doc.to_dict()
117
-
118
- except ResourceNotFoundError:
119
- raise
120
- except Exception as e:
121
- elapsed = (time.time() - start_time) * 1000
122
- self.logger.error(f"Error getting document {doc_id} after {elapsed:.2f}ms: {str(e)}", exc_info=True)
123
- raise ServiceError(
124
- operation=f"retrieving {self.resource_type}",
125
- error=e,
126
- resource_type=self.resource_type,
127
- resource_id=doc_id,
128
- additional_info={"collection": self.collection_name, "timeout": self.timeout}
129
- ) from e
130
-
131
- async def update_document(self, doc_id: str, update_data: Dict[str, Any], updater_uid: str) -> Dict[str, Any]:
132
- """Standard update method with validation and audit fields"""
133
- try:
134
- doc_ref = self.db.collection(self.collection_name).document(doc_id)
135
-
136
- # Apply timeout to the get operation
137
- if not doc_ref.get(timeout=self.timeout).exists:
138
- raise ResourceNotFoundError(
139
- resource_type=self.resource_type,
140
- resource_id=doc_id,
141
- additional_info={"collection": self.collection_name}
142
- )
143
-
144
- valid_fields = self._validate_update_fields(update_data)
145
-
146
- # Add audit fields
147
- valid_fields.update({
148
- 'updated_at': datetime.now(timezone.utc).isoformat(),
149
- 'updated_by': updater_uid
150
- })
151
-
152
- # Apply timeout to the update operation
153
- doc_ref.update(valid_fields, timeout=self.timeout)
154
- # Apply timeout to the get operation
155
- return doc_ref.get(timeout=self.timeout).to_dict()
156
-
157
- except (ResourceNotFoundError, ValidationError):
158
- raise
159
- except Exception as e:
160
- self.logger.error(f"Error updating {self.resource_type}: {e}", exc_info=True)
161
- raise ServiceError(
162
- operation=f"updating {self.resource_type}",
163
- error=e,
164
- resource_type=self.resource_type,
165
- resource_id=doc_id
166
- ) from e
167
-
168
- async def delete_document(self, doc_id: str) -> None:
169
- """Standard delete method"""
170
- try:
171
- doc_ref = self.db.collection(self.collection_name).document(doc_id)
172
- # Apply timeout to the get operation
173
- if not doc_ref.get(timeout=self.timeout).exists:
174
- raise ResourceNotFoundError(
175
- resource_type=self.resource_type,
176
- resource_id=doc_id
177
- )
178
-
179
- # Apply timeout to the delete operation
180
- doc_ref.delete(timeout=self.timeout)
181
- self.logger.info(f"Deleted {self.resource_type}: {doc_id}")
182
-
183
- except ResourceNotFoundError:
184
- raise
185
- except Exception as e:
186
- self.logger.error(f"Error deleting {self.resource_type}: {e}", exc_info=True)
187
- raise ServiceError(
188
- operation=f"deleting {self.resource_type}",
189
- error=e,
190
- resource_type=self.resource_type,
191
- resource_id=doc_id
192
- ) from e
193
-
194
- # Add query method with timeout
195
- async def query_documents(
196
- self,
197
- filters: Optional[List[tuple]] = None,
198
- limit: Optional[int] = None,
199
- order_by: Optional[tuple] = None
200
- ) -> List[Dict[str, Any]]:
201
- """Query documents with filters, limit, and ordering"""
202
- try:
203
- # Start with the collection reference
204
- query = self.db.collection(self.collection_name)
205
-
206
- # Apply filters if provided
207
- if filters:
208
- for field, op, value in filters:
209
- query = query.where(field=field, op_string=op, value=value)
210
-
211
- # Apply ordering if provided
212
- if order_by:
213
- field, direction = order_by
214
- query = query.order_by(field, direction=direction)
215
-
216
- # Apply limit if provided
217
- if limit:
218
- query = query.limit(limit)
219
-
220
- # Execute query with timeout
221
- docs = query.stream(timeout=self.timeout)
222
- return [doc.to_dict() for doc in docs]
223
-
224
- except Exception as e:
225
- self.logger.error(f"Error querying {self.resource_type}: {e}", exc_info=True)
226
- raise ServiceError(
227
- operation=f"querying {self.resource_type}",
228
- error=e,
229
- resource_type=self.resource_type
230
- ) from e
231
-
232
- def _validate_update_fields(self, update_data: Dict[str, Any]) -> Dict[str, Any]:
233
- """Centralized update fields validation"""
234
- if not isinstance(update_data, dict):
235
- update_data = update_data.model_dump(exclude_unset=True)
236
-
237
- valid_fields = {
238
- k: v for k, v in update_data.items()
239
- if v is not None and not (isinstance(v, (list, dict, set)) and len(v) == 0)
240
- }
241
-
242
- if not valid_fields:
243
- raise ValidationError(
244
- resource_type=self.resource_type,
245
- detail="No valid fields to update",
246
- resource_id=None
247
- )
248
-
249
- return valid_fields
@@ -1,140 +0,0 @@
1
- """ FastAPI ServiceMon"""
2
- import logging
3
- import time
4
- from fastapi import Request
5
- from ipulse_shared_base_ftredge import DataResource, Action, ProgressStatus, LogLevel
6
- from starlette.middleware.base import BaseHTTPMiddleware
7
- from . import Servicemon
8
-
9
-
10
- class FastAPIServiceMon(Servicemon):
11
- """
12
- Extension of Servicemon designed specifically for FastAPI applications.
13
- Adds integration with FastAPI request/response lifecycle.
14
- """
15
-
16
- @staticmethod
17
- def get_fastapi_middleware():
18
- """
19
- Creates a FastAPI middleware class that uses ServiceMon for request logging.
20
-
21
- Returns:
22
- A middleware class that can be registered with FastAPI
23
- """
24
-
25
-
26
- class ServiceMonMiddleware(BaseHTTPMiddleware):
27
- """
28
- Middleware class for integrating ServiceMon into FastAPI request/response lifecycle.
29
- """
30
- async def dispatch(self, request: Request, call_next):
31
- # Create ServiceMon instance
32
- logger_name = f"{request.app.state.env_prefix}__dp_core_api_live__apilogger"
33
- logger = logging.getLogger(logger_name)
34
-
35
- path = request.url.path
36
- method = request.method
37
-
38
- # Skip monitoring for certain paths
39
- skip_paths = ["/health", "/metrics", "/docs", "/redoc", "/openapi.json"]
40
- if any(path.startswith(skip_p) for skip_p in skip_paths):
41
- return await call_next(request)
42
-
43
- # Initialize ServiceMon
44
- servicemon = Servicemon(
45
- logger=logger,
46
- base_context=f"API: {path}\nMethod: {method}",
47
- service_name=f"API_{method}_{path.replace('/', '_')}"
48
- )
49
-
50
- # Start monitoring
51
- client_ip = request.client.host if request.client else "unknown"
52
- user_agent = request.headers.get("user-agent", "unknown")
53
- servicemon.start(f"API Request {method} {path}")
54
-
55
-
56
- # Add request info
57
- servicemon.log(
58
- level=LogLevel.INFO,
59
- description=f"Request received for {method} {path}. Client IP: {client_ip}. User Agent: {user_agent}",
60
- resource=DataResource.API_INTERNAL,
61
- action=Action.EXECUTE,
62
- progress_status=ProgressStatus.STARTED,
63
- )
64
-
65
- # Process the request and catch any errors
66
- try:
67
- # Store ServiceMon in request state for handlers to access
68
- request.state.svcmon = servicemon
69
-
70
- # Process request
71
- start_time = time.time()
72
- response = await call_next(request)
73
- process_time = int((time.time() - start_time) * 1000)
74
-
75
- # Log response
76
- status_code = response.status_code
77
- progress_status = (
78
- ProgressStatus.DONE
79
- if 200 <= status_code < 300
80
- else ProgressStatus.FINISHED_WITH_ISSUES
81
- )
82
-
83
- log_level = (
84
- LogLevel.ERROR if status_code >= 500
85
- else LogLevel.WARNING if status_code >= 400
86
- else LogLevel.INFO
87
- )
88
-
89
- servicemon.log(
90
- level=log_level,
91
- description=f"Response sent: {status_code} in {process_time}ms for {method} {path}",
92
- resource=DataResource.API_INTERNAL,
93
- action=Action.EXECUTE,
94
- progress_status=progress_status
95
-
96
- )
97
-
98
- # Finalize monitoring
99
- servicemon.end(status=progress_status)
100
- return response
101
-
102
- except Exception as exc:
103
- # Log error and re-raise
104
- servicemon.log(
105
- level=LogLevel.ERROR,
106
- description=f"Error processing request: {exc}",
107
- resource=DataResource.API_INTERNAL,
108
- action=Action.EXECUTE,
109
- progress_status=ProgressStatus.FAILED
110
- )
111
-
112
- servicemon.end(status=ProgressStatus.FAILED)
113
- raise
114
-
115
- return ServiceMonMiddleware
116
-
117
- # @staticmethod
118
- # def setup_fastapi(app):
119
- # """
120
- # Configure a FastAPI application with ServiceMon integration.
121
-
122
- # Args:
123
- # app: The FastAPI application instance
124
- # """
125
- # from fastapi import FastAPI
126
-
127
- # if not isinstance(app, FastAPI):
128
- # raise TypeError("Expected FastAPI application instance")
129
-
130
- # # Register middleware
131
- # app.add_middleware(FastAPIServiceMon.get_fastapi_middleware())
132
-
133
- # # Add dependency for route handlers
134
- # from fastapi import Depends, Request
135
-
136
- # async def get_servicemon(request: Request):
137
- # """Dependency for accessing the current ServiceMon instance."""
138
- # return getattr(request.state, "svcmon", None)
139
-
140
- # app.dependency_overrides[FastAPIServiceMon] = get_servicemon
@@ -1,240 +0,0 @@
1
- """
2
- ServiceMon - Lightweight monitoring for service functions and API endpoints
3
- """
4
- import uuid
5
- import time
6
- from datetime import datetime, timezone
7
- from typing import Dict, Any, Optional
8
- from contextlib import contextmanager
9
- from ipulse_shared_base_ftredge import (LogLevel, AbstractResource,
10
- ProgressStatus, Action, Resource,
11
- Alert, StructLog)
12
-
13
- class Servicemon:
14
- """
15
- ServiceMon is a lightweight version of Pipelinemon designed specifically for monitoring
16
- service functions like Cloud Functions and API endpoints.
17
-
18
- It provides:
19
- 1. Structured logging with context tracking
20
- 2. Performance metrics capture
21
- 3. Service health monitoring
22
- 4. Integration with FastAPI request/response cycle
23
- """
24
-
25
- def __init__(self, logger,
26
- base_context: str,
27
- service_name: str):
28
- """
29
- Initialize ServiceMon with basic configuration.
30
-
31
- Args:
32
- logger: The logger instance to use for logging
33
- base_context: Base context information for all logs
34
- service_name: Name of the service being monitored
35
- """
36
- # Set up execution tracking details
37
- self._start_time = None
38
- self._service_name = service_name
39
-
40
- timestamp = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
41
- uuid_suffix = str(uuid.uuid4())[:8] # Take first 8 chars of UUID
42
- self._id = f"{timestamp}_{uuid_suffix}"
43
-
44
- # Set up context handling
45
- self._base_context = base_context
46
- self._context_stack = []
47
-
48
- # Configure logging
49
- self._logger = logger
50
-
51
- # Metrics tracking
52
- self._metrics = {
53
- "status": ProgressStatus.NOT_STARTED.name,
54
- "errors": 0,
55
- "warnings": 0,
56
- "start_time": None,
57
- "end_time": None,
58
- "duration_ms": None,
59
- }
60
-
61
- @property
62
- def id(self) -> str:
63
- """Get the unique ID for this service execution."""
64
- return self._id
65
-
66
- @property
67
- def base_context(self) -> str:
68
- """Get the base context for this service execution."""
69
- return self._base_context
70
-
71
- @property
72
- def service_name(self) -> str:
73
- """Get the service name being monitored."""
74
- return self._service_name
75
-
76
- @property
77
- def metrics(self) -> Dict[str, Any]:
78
- """Get the current service metrics."""
79
- return self._metrics.copy()
80
-
81
- @property
82
- def current_context(self) -> str:
83
- """Get the current context stack as a string."""
84
- return " >> ".join(self._context_stack) if self._context_stack else "root"
85
-
86
- @contextmanager
87
- def context(self, context_name: str):
88
- """
89
- Context manager for tracking execution context.
90
-
91
- Args:
92
- context_name: The name of the current execution context
93
- """
94
- self.push_context(context_name)
95
- try:
96
- yield
97
- finally:
98
- self.pop_context()
99
-
100
- def push_context(self, context: str):
101
- """Add a context level to the stack."""
102
- self._context_stack.append(context)
103
-
104
- def pop_context(self):
105
- """Remove the most recent context from the stack."""
106
- if self._context_stack:
107
- return self._context_stack.pop()
108
-
109
- def start(self, description: Optional[str] = None) -> None:
110
- """
111
- Start monitoring a service execution.
112
-
113
- Args:
114
- description: Optional description of what's being executed
115
- """
116
- self._start_time = time.time()
117
- self._metrics["start_time"] = datetime.now(timezone.utc).isoformat()
118
- self._metrics["status"] = ProgressStatus.IN_PROGRESS.name
119
-
120
- # Log the start event
121
- msg = description if description else f"Starting {self.service_name}"
122
- self.log(level=LogLevel.INFO, description=msg, resource=AbstractResource.SERVICEMON, action=Action.EXECUTE,
123
- progress_status=ProgressStatus.IN_PROGRESS)
124
-
125
- def end(self, status: ProgressStatus = ProgressStatus.DONE) -> Dict[str, Any]:
126
- """
127
- End monitoring and record final metrics.
128
-
129
- Args:
130
- status: The final status of the service execution
131
-
132
- Returns:
133
- Dict containing metrics summary
134
- """
135
- # Calculate duration
136
- end_time = time.time()
137
- if self._start_time:
138
- duration_ms = int((end_time - self._start_time) * 1000)
139
- self._metrics["duration_ms"] = duration_ms
140
-
141
- # Update metrics
142
- self._metrics["end_time"] = datetime.now(timezone.utc).isoformat()
143
- self._metrics["status"] = status.name
144
-
145
- # Determine log level based on metrics
146
- if self._metrics["errors"] > 0:
147
- level = LogLevel.ERROR
148
- if status == ProgressStatus.DONE:
149
- status = ProgressStatus.FINISHED_WITH_ISSUES
150
- elif self._metrics["warnings"] > 0:
151
- level = LogLevel.WARNING
152
- if status == ProgressStatus.DONE:
153
- status = ProgressStatus.DONE_WITH_WARNINGS
154
- else:
155
- level = LogLevel.INFO
156
-
157
- # Prepare summary message
158
- summary_msg = (
159
- f"Service {self.service_name} completed with status {status.name}. "
160
- f"Duration: {self._metrics['duration_ms']}ms. "
161
- f"Errors: {self._metrics['errors']}, Warnings: {self._metrics['warnings']}"
162
- )
163
-
164
- # Log the completion
165
- self.log(
166
- level=level,
167
- description=summary_msg,
168
- resource=AbstractResource.SERVICEMON,
169
- action=Action.EXECUTE,
170
- progress_status=status,
171
- )
172
-
173
- return self._metrics
174
-
175
- def log(self,
176
- level: LogLevel,
177
- description: str,
178
- resource: Optional[Resource] = None,
179
- source: Optional[str] = None,
180
- destination: Optional[str] = None,
181
- action: Optional[Action] = None,
182
- progress_status: Optional[ProgressStatus] = None,
183
- alert: Optional[Alert] = None,
184
- e: Optional[Exception] = None,
185
- systems_impacted: Optional[str] = None,
186
- notes: Optional[str] = None,
187
- **kwargs) -> None:
188
- """
189
- Log a message with structured context.
190
-
191
- Args:
192
- level: Log level
193
- description: Log message
194
- resource: Resource being accessed
195
- action: Action being performed
196
- progress_status: Current progress status
197
- alert: Alert type if applicable
198
- e: Exception if logging an error
199
- **kwargs: Additional fields to include in the log
200
- """
201
- # Update metrics
202
- if level in (LogLevel.ERROR, LogLevel.CRITICAL):
203
- self._metrics["errors"] += 1
204
- elif level == LogLevel.WARNING:
205
- self._metrics["warnings"] += 1
206
-
207
-
208
- formatted_notes = f"{notes} ;elapsed_ms: {int((time.time() - self._start_time) * 1000)} " + str(kwargs)
209
- # Create structured log
210
- log = StructLog(
211
- level=level,
212
- resource=resource,
213
- action=action,
214
- progress_status=progress_status,
215
- alert=alert,
216
- e=e,
217
- source=source,
218
- destination=destination,
219
- description=description,
220
- collector_id=self.id,
221
- base_context=self.base_context,
222
- context=self.current_context,
223
- systems_impacted=systems_impacted,
224
- note=formatted_notes,
225
- **kwargs
226
- )
227
-
228
- # Add service-specific fields
229
- log_dict = log.to_dict()
230
-
231
- # Write to logger
232
- if level.value >= LogLevel.ERROR.value:
233
- self._logger.error(log_dict)
234
- elif level.value >= LogLevel.WARNING.value:
235
- self._logger.warning(log_dict)
236
- elif level.value >= LogLevel.INFO.value:
237
- self._logger.info(log_dict)
238
- else:
239
- self._logger.debug(log_dict)
240
-
@@ -1,33 +0,0 @@
1
- ipulse_shared_core_ftredge/__init__.py,sha256=b7hQEEfgIhLyLycNaM5vrCNVfiCrFoUkVdAGCP0nsbM,516
2
- ipulse_shared_core_ftredge/cache/__init__.py,sha256=i2fPojmZiBwAoY5ovnnnME9USl4bi8MRPYkAgEfACfI,136
3
- ipulse_shared_core_ftredge/cache/shared_cache.py,sha256=NMHSQyHjhn11IB3cQjw7ctV18CXBT1X6FC2UvURtBy8,12957
4
- ipulse_shared_core_ftredge/dependencies/__init__.py,sha256=HGsR8HUguKTfjz_BorCILS4izX8CAjG-apE0kIPE0Yo,68
5
- ipulse_shared_core_ftredge/dependencies/auth_firebase_token_validation.py,sha256=EFWyhoVOI0tGYOWqN5St4JNIy4cMwpxeBhKdjOwEfbg,1888
6
- ipulse_shared_core_ftredge/dependencies/auth_protected_router.py,sha256=em5D5tE7OkgZmuCtYCKuUAnIZCgRJhCF8Ye5QmtGWlk,1807
7
- ipulse_shared_core_ftredge/dependencies/authz_for_apis.py,sha256=6mJwk_xJILbnvPDfnxXyCebvP9TymvK0NaEDT8KBU-A,15826
8
- ipulse_shared_core_ftredge/dependencies/firestore_client.py,sha256=VbTb121nsc9EZPd1RDEsHBLW5pIiVw6Wdo2JFL4afMg,714
9
- ipulse_shared_core_ftredge/models/__init__.py,sha256=xGbDLElTRbUwcQaOTwz5Myxv5hAP2S3xSTMgO2hN-Ko,456
10
- ipulse_shared_core_ftredge/models/base_api_response.py,sha256=OwuWI2PsMSLDkFt643u35ZhW5AHFEMMAGnGprmUO0fA,2380
11
- ipulse_shared_core_ftredge/models/base_data_model.py,sha256=feG_K4i1xX_J_h9QrdxshmFvUsS1_hWR8BBoh0DRxoM,2543
12
- ipulse_shared_core_ftredge/models/organization_profile.py,sha256=OnjsSVcp_LSB65F9Tl9udwNgqMg7gjSpv38eArpVXPc,3668
13
- ipulse_shared_core_ftredge/models/subscription.py,sha256=bu6BtyDQ4jDkK3PLY97dZ_A3cmjzZahTkuaFOFybdxI,6892
14
- ipulse_shared_core_ftredge/models/user_auth.py,sha256=YgCeK0uJ-JOkPavwzogl4wGC3RpA8PVfl-5MPS4Kxhk,432
15
- ipulse_shared_core_ftredge/models/user_profile.py,sha256=5cTTZa7pMkgKCsLgTPpvz_aPn-ZyQcJ3xSEtu3jq3HE,4138
16
- ipulse_shared_core_ftredge/models/user_profile_update.py,sha256=3BqAAqnVKXPKhAcfV_aOERe8GyIkX0NU_LJcQa02aLw,1319
17
- ipulse_shared_core_ftredge/models/user_status.py,sha256=rAx8l5GrB8TN7RvZ1eIMskphRxdYqO1OZ8NnaIxUUW8,23660
18
- ipulse_shared_core_ftredge/services/__init__.py,sha256=tUUZxdAfth0oqF0bkTrj1FlRJQnZkMrF1r1ryNB-yU4,906
19
- ipulse_shared_core_ftredge/services/base_firestore_service.py,sha256=n1lymQEFcu6zHkdscNNCNIzTIVmja8cBtNy2yi5vfTE,9817
20
- ipulse_shared_core_ftredge/services/base_service_exceptions.py,sha256=Bi0neeMY0YncWDeqUavu5JUslkjJ6QcDVRU32Ipjc08,4294
21
- ipulse_shared_core_ftredge/services/cache_aware_firestore_service.py,sha256=rPaE2gZ05iAo5TKfIqc0yuyiVJqfbd7TQBFhWdUHJNc,7870
22
- ipulse_shared_core_ftredge/services/charging_processors.py,sha256=8bozatlie8egZFA-IUc2Vh1zjhyTdDqoe5nNgsL_ebM,16170
23
- ipulse_shared_core_ftredge/services/charging_service.py,sha256=buUR09TWLEmDeo1n3-DxRz9Vc8tLkYP4DTzlyVkHogM,14639
24
- ipulse_shared_core_ftredge/services/fastapiservicemon.py,sha256=27clTZXH32mbju8o-HLO_8VrmugmpXwHLuX-OOoIAew,5308
25
- ipulse_shared_core_ftredge/services/servicemon.py,sha256=wWhsLwU1_07emaEyCNziZA1bDQVLxcfvQj0OseTLSTI,7969
26
- ipulse_shared_core_ftredge/utils/__init__.py,sha256=JnxUb8I2MRjJC7rBPXSrpwBIQDEOku5O9JsiTi3oun8,56
27
- ipulse_shared_core_ftredge/utils/custom_json_encoder.py,sha256=DblQLD0KOSNDyQ58wQRogBrShIXzPIZUw_oGOBATnJY,1366
28
- ipulse_shared_core_ftredge/utils/json_encoder.py,sha256=QkcaFneVv3-q-s__Dz4OiUWYnM6jgHDJrDMdPv09RCA,2093
29
- ipulse_shared_core_ftredge-18.0.1.dist-info/licenses/LICENCE,sha256=YBtYAXNqCCOo9Mr2hfkbSPAM9CeAr2j1VZBSwQTrNwE,1060
30
- ipulse_shared_core_ftredge-18.0.1.dist-info/METADATA,sha256=SXCSNUI23dl8PQes6lO-5wItHJowquYaqJr4eiWebtM,803
31
- ipulse_shared_core_ftredge-18.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
32
- ipulse_shared_core_ftredge-18.0.1.dist-info/top_level.txt,sha256=8sgYrptpexkA_6_HyGvho26cVFH9kmtGvaK8tHbsGHk,27
33
- ipulse_shared_core_ftredge-18.0.1.dist-info/RECORD,,
@@ -99,6 +99,7 @@ class ResourceNotFoundError(BaseServiceException):
99
99
  additional_info=additional_info
100
100
  )
101
101
 
102
+
102
103
  class AuthorizationError(BaseServiceException):
103
104
  def __init__(
104
105
  self,
@@ -132,4 +133,3 @@ class ValidationError(BaseServiceException):
132
133
  resource_id=resource_id,
133
134
  additional_info=additional_info
134
135
  )
135
-