ipulse-shared-core-ftredge 16.0.1__py3-none-any.whl → 19.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.

Files changed (36) hide show
  1. ipulse_shared_core_ftredge/__init__.py +1 -12
  2. ipulse_shared_core_ftredge/dependencies/authz_for_apis.py +8 -5
  3. ipulse_shared_core_ftredge/exceptions/__init__.py +47 -0
  4. ipulse_shared_core_ftredge/exceptions/user_exceptions.py +219 -0
  5. ipulse_shared_core_ftredge/models/__init__.py +1 -3
  6. ipulse_shared_core_ftredge/models/base_api_response.py +15 -0
  7. ipulse_shared_core_ftredge/models/base_data_model.py +7 -6
  8. ipulse_shared_core_ftredge/models/user_auth.py +59 -4
  9. ipulse_shared_core_ftredge/models/user_profile.py +41 -7
  10. ipulse_shared_core_ftredge/models/user_status.py +44 -138
  11. ipulse_shared_core_ftredge/monitoring/__init__.py +5 -0
  12. ipulse_shared_core_ftredge/monitoring/microservmon.py +483 -0
  13. ipulse_shared_core_ftredge/services/__init__.py +21 -14
  14. ipulse_shared_core_ftredge/services/base/__init__.py +12 -0
  15. ipulse_shared_core_ftredge/services/base/base_firestore_service.py +520 -0
  16. ipulse_shared_core_ftredge/services/cache_aware_firestore_service.py +44 -8
  17. ipulse_shared_core_ftredge/services/charging_service.py +1 -1
  18. ipulse_shared_core_ftredge/services/user/__init__.py +37 -0
  19. ipulse_shared_core_ftredge/services/user/iam_management_operations.py +326 -0
  20. ipulse_shared_core_ftredge/services/user/subscription_management_operations.py +384 -0
  21. ipulse_shared_core_ftredge/services/user/user_account_operations.py +479 -0
  22. ipulse_shared_core_ftredge/services/user/user_auth_operations.py +305 -0
  23. ipulse_shared_core_ftredge/services/user/user_core_service.py +651 -0
  24. ipulse_shared_core_ftredge/services/user/user_holistic_operations.py +436 -0
  25. {ipulse_shared_core_ftredge-16.0.1.dist-info → ipulse_shared_core_ftredge-19.0.1.dist-info}/METADATA +2 -2
  26. ipulse_shared_core_ftredge-19.0.1.dist-info/RECORD +41 -0
  27. {ipulse_shared_core_ftredge-16.0.1.dist-info → ipulse_shared_core_ftredge-19.0.1.dist-info}/WHEEL +1 -1
  28. ipulse_shared_core_ftredge/models/organization_profile.py +0 -96
  29. ipulse_shared_core_ftredge/models/user_profile_update.py +0 -39
  30. ipulse_shared_core_ftredge/services/base_firestore_service.py +0 -249
  31. ipulse_shared_core_ftredge/services/fastapiservicemon.py +0 -140
  32. ipulse_shared_core_ftredge/services/servicemon.py +0 -240
  33. ipulse_shared_core_ftredge-16.0.1.dist-info/RECORD +0 -33
  34. ipulse_shared_core_ftredge/{services/base_service_exceptions.py → exceptions/base_exceptions.py} +1 -1
  35. {ipulse_shared_core_ftredge-16.0.1.dist-info → ipulse_shared_core_ftredge-19.0.1.dist-info}/licenses/LICENCE +0 -0
  36. {ipulse_shared_core_ftredge-16.0.1.dist-info → ipulse_shared_core_ftredge-19.0.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,483 @@
1
+ """
2
+ Microservmon - Lightweight logging collector for microservice requests and functions and API endpoints
3
+ """
4
+ import uuid
5
+ import time
6
+ from datetime import datetime, timezone
7
+ from typing import Dict, Any, Optional
8
+ from contextlib import contextmanager
9
+ from collections import defaultdict
10
+ from ipulse_shared_base_ftredge import (LogLevel, AbstractResource,
11
+ ProgressStatus, Action,
12
+ Alert, StructLog)
13
+ from ipulse_shared_base_ftredge.status import StatusCounts, map_progress_status_to_log_level, eval_statuses
14
+
15
+ class Microservmon:
16
+ """
17
+ Microservmon is a lightweight version of Pipelinemon designed specifically for monitoring
18
+ microservice events such as HTTP Requests, PubSub Triggers etc within execution environment like
19
+ Cloud Functions, Cloud Run etc.
20
+
21
+ It provides:
22
+ 1. Structured logging with context tracking
23
+ 2. Performance metrics capture per trace
24
+ 3. Microservice health monitoring
25
+ 4. Integration with FastAPI request/response cycle
26
+ 5. Memory-efficient trace lifecycle management
27
+ """
28
+
29
+ def __init__(self, logger,
30
+ base_context: str,
31
+ microservice_name: str,
32
+ max_log_field_len: Optional[int] = 8000,
33
+ max_log_dict_byte_size: Optional[float] = 256 * 1024 * 0.80,
34
+ exclude_none_from_logs: bool = True):
35
+ """
36
+ Initialize Microservmon with basic configuration.
37
+
38
+ Args:
39
+ logger: The logger instance to use for logging
40
+ base_context: Base context information for all logs
41
+ microservice_name: Name of the microservice being monitored
42
+ max_log_field_len: Maximum length for any string field in logs
43
+ max_log_dict_byte_size: Maximum byte size for log dictionary
44
+ exclude_none_from_logs: Whether to exclude None values from log output (default: True)
45
+ """
46
+ # Set up microservice tracking details
47
+ self._microservice_name = microservice_name
48
+ self._microservice_start_time = time.time()
49
+
50
+ timestamp = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
51
+ uuid_suffix = str(uuid.uuid4())[:8] # Take first 8 chars of UUID
52
+ self._id = f"{timestamp}_{uuid_suffix}"
53
+
54
+ # Set up context handling
55
+ self._base_context = base_context
56
+ self._context_stack = []
57
+
58
+ # Configure logging
59
+ self._logger = logger
60
+ self._max_log_field_len = max_log_field_len
61
+ self._max_log_dict_byte_size = max_log_dict_byte_size
62
+ self._exclude_none_from_logs = exclude_none_from_logs
63
+
64
+ # Trace-based tracking - key change from original
65
+ self._active_traces: Dict[str, Dict[str, Any]] = {}
66
+
67
+ # Microservice-wide metrics (aggregated across all traces)
68
+ self._microservice_metrics = {
69
+ "total_traces": 0,
70
+ "active_traces": 0,
71
+ "completed_traces": 0,
72
+ "failed_traces": 0,
73
+ "by_event_count": defaultdict(int),
74
+ "by_level_code_count": defaultdict(int),
75
+ "status_counts": StatusCounts() # Add StatusCounts for better status tracking
76
+ }
77
+
78
+ # Log microservice startup
79
+ self._log_microservice_start()
80
+
81
+ def _log_microservice_start(self):
82
+ """Log the microservice instance startup."""
83
+ startup_log = StructLog(
84
+ level=LogLevel.INFO,
85
+ resource=AbstractResource.MICROSERVMON,
86
+ action=Action.EXECUTE,
87
+ progress_status=ProgressStatus.STARTED,
88
+ description=f"Microservice {self.microservice_name} instance started",
89
+ collector_id=self.id,
90
+ base_context=self.base_context,
91
+ context="microservice_startup"
92
+ )
93
+ self._write_log_to_logger(startup_log)
94
+
95
+ @property
96
+ def id(self) -> str:
97
+ """Get the unique ID for this microservice instance."""
98
+ return self._id
99
+
100
+ @property
101
+ def base_context(self) -> str:
102
+ """Get the base context for this microservice execution."""
103
+ return self._base_context
104
+
105
+ @property
106
+ def microservice_name(self) -> str:
107
+ """Get the microservice name being monitored."""
108
+ return self._microservice_name
109
+
110
+ @property
111
+ def microservice_metrics(self) -> Dict[str, Any]:
112
+ """Get the current microservice-wide metrics."""
113
+ metrics = self._microservice_metrics.copy()
114
+ metrics["by_event_count"] = dict(metrics["by_event_count"])
115
+ metrics["by_level_code_count"] = dict(metrics["by_level_code_count"])
116
+ return metrics
117
+
118
+ @property
119
+ def active_trace_count(self) -> int:
120
+ """Get count of currently active traces."""
121
+ return len(self._active_traces)
122
+
123
+ @property
124
+ def current_context(self) -> str:
125
+ """Get the current context stack as a string."""
126
+ return " >> ".join(self._context_stack) if self._context_stack else "root"
127
+
128
+ @contextmanager
129
+ def context(self, context_name: str):
130
+ """
131
+ Context manager for tracking execution context.
132
+ Note: This is for microservice-wide context, not trace-specific.
133
+
134
+ Args:
135
+ context_name: The name of the current execution context
136
+ """
137
+ self.push_context(context_name)
138
+ try:
139
+ yield
140
+ finally:
141
+ self.pop_context()
142
+
143
+ def push_context(self, context: str):
144
+ """Add a context level to the stack."""
145
+ self._context_stack.append(context)
146
+
147
+ def pop_context(self):
148
+ """Remove the most recent context from the stack."""
149
+ if self._context_stack:
150
+ return self._context_stack.pop()
151
+
152
+ def start_trace(self, description: Optional[str] = None) -> str:
153
+ """
154
+ Start monitoring a new trace (request/event) with auto-generated trace ID.
155
+
156
+ Args:
157
+ description: Optional description of the trace
158
+
159
+ Returns:
160
+ str: Auto-generated trace_id
161
+ """
162
+ # Auto-generate trace ID
163
+ timestamp = datetime.now(timezone.utc).strftime('%H%M%S_%f')[:-3] # Include milliseconds
164
+ trace_id = f"trace_{timestamp}_{str(uuid.uuid4())[:6]}"
165
+
166
+ start_time = time.time()
167
+
168
+ # Initialize trace metrics with StatusCounts
169
+ self._active_traces[trace_id] = {
170
+ "start_time": start_time,
171
+ "status": ProgressStatus.IN_PROGRESS.name,
172
+ "by_event_count": defaultdict(int),
173
+ "by_level_code_count": defaultdict(int),
174
+ "status_counts": StatusCounts() # Track status progression within trace
175
+ }
176
+
177
+ # Add initial status to trace
178
+ self._active_traces[trace_id]["status_counts"].add_status(ProgressStatus.IN_PROGRESS)
179
+
180
+ # Update microservice metrics
181
+ self._microservice_metrics["total_traces"] += 1
182
+ self._microservice_metrics["active_traces"] = len(self._active_traces)
183
+ self._microservice_metrics["status_counts"].add_status(ProgressStatus.IN_PROGRESS)
184
+
185
+ # Log the trace start using StructLog
186
+ msg = description if description else f"Starting trace {trace_id}"
187
+ start_log = StructLog(
188
+ level=LogLevel.INFO,
189
+ description=msg,
190
+ resource=AbstractResource.MICROSERVICE_TRACE,
191
+ action=Action.EXECUTE,
192
+ progress_status=ProgressStatus.IN_PROGRESS
193
+ )
194
+ self.log(start_log, trace_id)
195
+
196
+ return trace_id
197
+
198
+ def end_trace(self, trace_id: str, force_status: Optional[ProgressStatus] = None) -> None:
199
+ """
200
+ End monitoring for a trace and record final metrics.
201
+ Status is automatically calculated based on trace metrics unless forced.
202
+
203
+ Args:
204
+ trace_id: The trace identifier to end
205
+ force_status: Optional status to force, overriding automatic calculation
206
+ """
207
+ if trace_id not in self._active_traces:
208
+ # Log warning about unknown trace using StructLog
209
+ warning_log = StructLog(
210
+ level=LogLevel.WARNING,
211
+ description=f"Attempted to end unknown trace: {trace_id}",
212
+ resource=AbstractResource.MICROSERVICE_TRACE,
213
+ action=Action.EXECUTE,
214
+ progress_status=ProgressStatus.UNFINISHED,
215
+ alert=Alert.NOT_FOUND
216
+ )
217
+ self.log(warning_log)
218
+ return
219
+
220
+ trace_metrics = self._active_traces[trace_id]
221
+
222
+ # Calculate duration
223
+ end_time = time.time()
224
+ duration_ms = int((end_time - trace_metrics["start_time"]) * 1000)
225
+ trace_metrics["duration_ms"] = duration_ms
226
+ trace_metrics["end_time"] = end_time
227
+
228
+ # Get readable error/warning counts using existing helper functions
229
+ error_count = self._get_error_count_for_trace(trace_id)
230
+ warning_count = self._get_warning_count_for_trace(trace_id)
231
+
232
+ # Use status helpers for intelligent status calculation
233
+ if force_status is not None:
234
+ final_status = force_status
235
+ level = map_progress_status_to_log_level(final_status)
236
+ else:
237
+ # Build status list based on log levels for evaluation
238
+ status_list = []
239
+ if error_count > 0:
240
+ status_list.append(ProgressStatus.FINISHED_WITH_ISSUES)
241
+ elif warning_count > 0:
242
+ status_list.append(ProgressStatus.DONE_WITH_WARNINGS)
243
+ else:
244
+ status_list.append(ProgressStatus.DONE)
245
+
246
+ # Use eval_statuses for consistent status evaluation
247
+ final_status = eval_statuses(
248
+ status_list,
249
+ fail_or_unfinish_if_any_pending=True, # Since this is end of trace
250
+ issues_allowed=True
251
+ )
252
+ level = map_progress_status_to_log_level(final_status)
253
+
254
+ # Update trace status
255
+ trace_metrics["status"] = final_status.name
256
+ trace_metrics["status_counts"].add_status(final_status)
257
+
258
+ # Prepare summary message
259
+ status_source = "FORCED" if force_status is not None else "AUTO"
260
+ summary_msg = (
261
+ f"Trace {trace_id} completed with status {final_status.name} ({status_source}). "
262
+ f"Duration: {duration_ms}ms. "
263
+ f"Errors: {error_count}, Warnings: {warning_count}"
264
+ )
265
+
266
+ # Log the completion using StructLog
267
+ completion_log = StructLog(
268
+ level=level,
269
+ description=summary_msg,
270
+ resource=AbstractResource.MICROSERVMON,
271
+ action=Action.EXECUTE,
272
+ progress_status=final_status
273
+ )
274
+ self.log(completion_log, trace_id)
275
+
276
+ # Update microservice-wide metrics using StatusCounts
277
+ self._microservice_metrics["completed_traces"] += 1
278
+ if final_status in ProgressStatus.failure_statuses():
279
+ self._microservice_metrics["failed_traces"] += 1
280
+
281
+ # Add final status to microservice status counts
282
+ self._microservice_metrics["status_counts"].add_status(final_status)
283
+
284
+ # Aggregate trace metrics to microservice level
285
+ for event, count in trace_metrics["by_event_count"].items():
286
+ self._microservice_metrics["by_event_count"][event] += count
287
+ for level_code, count in trace_metrics["by_level_code_count"].items():
288
+ self._microservice_metrics["by_level_code_count"][level_code] += count
289
+
290
+ # Clean up trace data from memory
291
+ del self._active_traces[trace_id]
292
+ self._microservice_metrics["active_traces"] = len(self._active_traces)
293
+
294
+ def log(self, log: StructLog, trace_id: Optional[str] = None) -> None:
295
+ """
296
+ Log a StructLog message with trace context.
297
+
298
+ Args:
299
+ log: StructLog instance to log
300
+ trace_id: Optional trace ID for trace-specific logging
301
+ """
302
+ # Calculate elapsed time
303
+ elapsed_ms = None
304
+ if trace_id and trace_id in self._active_traces:
305
+ start_time = self._active_traces[trace_id]["start_time"]
306
+ elapsed_ms = int((time.time() - start_time) * 1000)
307
+ else:
308
+ # Use microservice start time if no trace
309
+ elapsed_ms = int((time.time() - self._microservice_start_time) * 1000)
310
+
311
+ # Set microservice-specific context on the log
312
+ log.collector_id = self.id
313
+ log.base_context = self.base_context
314
+ # Context includes trace_id automatically via StructLog
315
+ log.context = f"{self.current_context} >> {trace_id}" if trace_id else self.current_context
316
+ log.trace_id = trace_id
317
+
318
+ # Append elapsed time to existing notes
319
+ existing_note = log.note or ""
320
+ elapsed_note = f"elapsed_ms: {elapsed_ms}"
321
+ log.note = f"{existing_note}; {elapsed_note}" if existing_note else elapsed_note
322
+
323
+ # Update metrics for the trace or microservice
324
+ self._update_counts(log, trace_id)
325
+
326
+ # Write to logger
327
+ self._write_log_to_logger(log)
328
+
329
+ def _update_counts(self, log: StructLog, trace_id: Optional[str] = None):
330
+ """Update counts for event tracking."""
331
+ event_tuple = log.getEvent()
332
+ level = log.level
333
+
334
+ # Update trace-specific metrics if trace_id provided
335
+ if trace_id and trace_id in self._active_traces:
336
+ trace_metrics = self._active_traces[trace_id]
337
+ trace_metrics["by_event_count"][event_tuple] += 1
338
+ trace_metrics["by_level_code_count"][level.value] += 1
339
+
340
+ def _get_error_count_for_trace(self, trace_id: str) -> int:
341
+ """Get total error count (ERROR + CRITICAL) for a specific trace."""
342
+ if trace_id not in self._active_traces:
343
+ return 0
344
+
345
+ trace_metrics = self._active_traces[trace_id]
346
+ return (trace_metrics["by_level_code_count"].get(LogLevel.ERROR.value, 0) +
347
+ trace_metrics["by_level_code_count"].get(LogLevel.CRITICAL.value, 0))
348
+
349
+ def _get_warning_count_for_trace(self, trace_id: str) -> int:
350
+ """Get warning count for a specific trace."""
351
+ if trace_id not in self._active_traces:
352
+ return 0
353
+
354
+ trace_metrics = self._active_traces[trace_id]
355
+ return trace_metrics["by_level_code_count"].get(LogLevel.WARNING.value, 0)
356
+
357
+ def get_readable_level_counts_for_trace(self, trace_id: str) -> Dict[str, int]:
358
+ """Get readable level counts for a specific trace."""
359
+ if trace_id not in self._active_traces:
360
+ return {}
361
+
362
+ trace_metrics = self._active_traces[trace_id]
363
+ readable_counts = {}
364
+
365
+ for level_code, count in trace_metrics["by_level_code_count"].items():
366
+ if count > 0:
367
+ # Convert level code back to LogLevel enum and get name
368
+ for level in LogLevel:
369
+ if level.value == level_code:
370
+ readable_counts[level.name] = count
371
+ break
372
+
373
+ return readable_counts
374
+
375
+ def get_readable_microservice_level_counts(self) -> Dict[str, int]:
376
+ """Get readable level counts for the entire microservice."""
377
+ readable_counts = {}
378
+
379
+ for level_code, count in self._microservice_metrics["by_level_code_count"].items():
380
+ if count > 0:
381
+ # Convert level code back to LogLevel enum and get name
382
+ for level in LogLevel:
383
+ if level.value == level_code:
384
+ readable_counts[level.name] = count
385
+ break
386
+
387
+ return readable_counts
388
+
389
+ def get_microservice_status_summary(self) -> Dict[str, Any]:
390
+ """Get comprehensive status summary using StatusCounts"""
391
+ status_counts = self._microservice_metrics["status_counts"]
392
+
393
+ return {
394
+ "microservice_name": self.microservice_name,
395
+ "microservice_id": self.id,
396
+ "active_traces": self.active_trace_count,
397
+ "total_traces": self._microservice_metrics["total_traces"],
398
+ "completed_traces": self._microservice_metrics["completed_traces"],
399
+ "failed_traces": self._microservice_metrics["failed_traces"],
400
+ "status_breakdown": status_counts.get_count_breakdown(),
401
+ "completion_rate": status_counts.completion_rate,
402
+ "success_rate": status_counts.success_rate,
403
+ "has_issues": status_counts.has_issues,
404
+ "has_failures": status_counts.has_failures,
405
+ "readable_level_counts": self.get_readable_microservice_level_counts()
406
+ }
407
+
408
+ def evaluate_microservice_health(self) -> Dict[str, Any]:
409
+ """Evaluate overall microservice health using status helpers"""
410
+ status_counts = self._microservice_metrics["status_counts"]
411
+
412
+ # Determine overall health status
413
+ if status_counts.has_failures:
414
+ health_status = "UNHEALTHY"
415
+ health_level = LogLevel.ERROR
416
+ elif status_counts.has_issues:
417
+ health_status = "DEGRADED"
418
+ health_level = LogLevel.WARNING
419
+ elif status_counts.has_warnings:
420
+ health_status = "WARNING"
421
+ health_level = LogLevel.WARNING
422
+ else:
423
+ health_status = "HEALTHY"
424
+ health_level = LogLevel.INFO
425
+
426
+ return {
427
+ "health_status": health_status,
428
+ "health_level": health_level.name,
429
+ "summary": status_counts.get_summary(),
430
+ "metrics": self.get_microservice_status_summary()
431
+ }
432
+
433
+ def log_health_check(self) -> None:
434
+ """Log a health check using the status evaluation"""
435
+ health = self.evaluate_microservice_health()
436
+
437
+ health_log = StructLog(
438
+ level=LogLevel[health["health_level"]],
439
+ description=f"Microservice health check: {health['health_status']}",
440
+ resource=AbstractResource.MICROSERVMON,
441
+ action=Action.VALIDATE,
442
+ progress_status=ProgressStatus.DONE,
443
+ note=health["summary"]
444
+ )
445
+ self.log(health_log)
446
+
447
+ # Legacy compatibility methods
448
+ def start(self, description: Optional[str] = None) -> None:
449
+ """Legacy method for backward compatibility."""
450
+ trace_id = self.start_trace(description)
451
+
452
+ def end(self, force_status: Optional[ProgressStatus] = None) -> Dict[str, Any]:
453
+ """Legacy method for backward compatibility."""
454
+ # Find the most recent trace or create a dummy one
455
+ if self._active_traces:
456
+ trace_id = max(self._active_traces.keys())
457
+ self.end_trace(trace_id, force_status)
458
+ # Return microservice metrics for legacy compatibility
459
+ return self.microservice_metrics
460
+ return {}
461
+
462
+ @property
463
+ def metrics(self) -> Dict[str, Any]:
464
+ """Legacy property for backward compatibility."""
465
+ return self.microservice_metrics
466
+
467
+ def _write_log_to_logger(self, log: StructLog):
468
+ """Write structured log to the logger."""
469
+ log_dict = log.to_dict(
470
+ max_field_len=self._max_log_field_len,
471
+ byte_size_limit=self._max_log_dict_byte_size,
472
+ exclude_none=self._exclude_none_from_logs
473
+ )
474
+
475
+ # Write to logger based on level
476
+ if log.level.value >= LogLevel.ERROR.value:
477
+ self._logger.error(log_dict)
478
+ elif log.level.value >= LogLevel.WARNING.value:
479
+ self._logger.warning(log_dict)
480
+ elif log.level.value >= LogLevel.INFO.value:
481
+ self._logger.info(log_dict)
482
+ else:
483
+ self._logger.debug(log_dict)
@@ -1,18 +1,25 @@
1
1
  """Service utilities for shared core."""
2
- # Import existing components
3
- from ipulse_shared_core_ftredge.services.base_service_exceptions import (
4
- BaseServiceException, ServiceError, ValidationError, ResourceNotFoundError, AuthorizationError
5
- )
6
- from ipulse_shared_core_ftredge.services.servicemon import Servicemon
7
- from ipulse_shared_core_ftredge.services.base_firestore_service import BaseFirestoreService
8
- from ipulse_shared_core_ftredge.services.cache_aware_firestore_service import CacheAwareFirestoreService
9
2
 
10
- from ipulse_shared_core_ftredge.services.charging_processors import (ChargingProcessor)
11
- from ipulse_shared_core_ftredge.services.charging_service import ChargingService
12
3
 
13
- __all__ = [
14
- 'AuthorizationError', 'BaseServiceException', 'ServiceError', 'ValidationError',
15
- 'ResourceNotFoundError', 'BaseFirestoreService',
16
- 'CacheAwareFirestoreService', 'Servicemon',
17
- 'ChargingProcessor'
4
+ # Import from base services
5
+ from .base import BaseFirestoreService
6
+ from .cache_aware_firestore_service import CacheAwareFirestoreService
7
+
8
+ from .charging_processors import ChargingProcessor
9
+ from .charging_service import ChargingService
10
+
11
+ # Import user services from the user package
12
+ from .user import (
13
+ UserCoreService,
14
+ UserAccountOperations, SubscriptionManagementOperations,
15
+ IAMManagementOperations, UserAuthOperations, UserHolisticOperations,
16
+ SubscriptionPlanDocument, UserTypeDefaultsDocument
17
+ )
18
+
19
+ __all__ = [ 'BaseFirestoreService',
20
+ 'CacheAwareFirestoreService',
21
+ 'ChargingProcessor', 'ChargingService', 'UserCoreService',
22
+ 'UserAccountOperations', 'SubscriptionManagementOperations',
23
+ 'IAMManagementOperations', 'UserAuthOperations', 'UserHolisticOperations',
24
+ 'SubscriptionPlanDocument', 'UserTypeDefaultsDocument'
18
25
  ]
@@ -0,0 +1,12 @@
1
+ """
2
+ Base service classes for ipulse_shared_core_ftredge
3
+
4
+ This module provides base service classes without importing any concrete services,
5
+ preventing circular import dependencies.
6
+ """
7
+
8
+ from .base_firestore_service import BaseFirestoreService
9
+
10
+ __all__ = [
11
+ 'BaseFirestoreService'
12
+ ]