ipulse-shared-core-ftredge 19.0.1__py3-none-any.whl → 22.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.
- ipulse_shared_core_ftredge/cache/shared_cache.py +1 -2
- ipulse_shared_core_ftredge/dependencies/authz_for_apis.py +4 -4
- ipulse_shared_core_ftredge/exceptions/base_exceptions.py +23 -0
- ipulse_shared_core_ftredge/models/__init__.py +3 -7
- ipulse_shared_core_ftredge/models/base_data_model.py +17 -19
- ipulse_shared_core_ftredge/models/catalog/__init__.py +10 -0
- ipulse_shared_core_ftredge/models/catalog/subscriptionplan.py +273 -0
- ipulse_shared_core_ftredge/models/catalog/usertype.py +170 -0
- ipulse_shared_core_ftredge/models/user/__init__.py +5 -0
- ipulse_shared_core_ftredge/models/user/user_permissions.py +66 -0
- ipulse_shared_core_ftredge/models/{subscription.py → user/user_subscription.py} +66 -20
- ipulse_shared_core_ftredge/models/{user_auth.py → user/userauth.py} +19 -10
- ipulse_shared_core_ftredge/models/{user_profile.py → user/userprofile.py} +53 -21
- ipulse_shared_core_ftredge/models/user/userstatus.py +430 -0
- ipulse_shared_core_ftredge/monitoring/__init__.py +2 -2
- ipulse_shared_core_ftredge/monitoring/tracemon.py +320 -0
- ipulse_shared_core_ftredge/services/__init__.py +11 -13
- ipulse_shared_core_ftredge/services/base/__init__.py +3 -1
- ipulse_shared_core_ftredge/services/base/base_firestore_service.py +73 -14
- ipulse_shared_core_ftredge/services/{cache_aware_firestore_service.py → base/cache_aware_firestore_service.py} +46 -32
- ipulse_shared_core_ftredge/services/catalog/__init__.py +14 -0
- ipulse_shared_core_ftredge/services/catalog/catalog_subscriptionplan_service.py +273 -0
- ipulse_shared_core_ftredge/services/catalog/catalog_usertype_service.py +307 -0
- ipulse_shared_core_ftredge/services/charging_processors.py +25 -25
- ipulse_shared_core_ftredge/services/user/__init__.py +5 -25
- ipulse_shared_core_ftredge/services/user/firebase_auth_admin_helpers.py +160 -0
- ipulse_shared_core_ftredge/services/user/user_core_service.py +423 -515
- ipulse_shared_core_ftredge/services/user/user_multistep_operations.py +726 -0
- ipulse_shared_core_ftredge/services/user/user_permissions_operations.py +392 -0
- ipulse_shared_core_ftredge/services/user/user_subscription_operations.py +484 -0
- ipulse_shared_core_ftredge/services/user/userauth_operations.py +928 -0
- ipulse_shared_core_ftredge/services/user/userprofile_operations.py +166 -0
- ipulse_shared_core_ftredge/services/user/userstatus_operations.py +212 -0
- ipulse_shared_core_ftredge/services/{charging_service.py → user_charging_service.py} +9 -9
- {ipulse_shared_core_ftredge-19.0.1.dist-info → ipulse_shared_core_ftredge-22.1.1.dist-info}/METADATA +3 -4
- ipulse_shared_core_ftredge-22.1.1.dist-info/RECORD +51 -0
- ipulse_shared_core_ftredge/models/user_status.py +0 -495
- ipulse_shared_core_ftredge/monitoring/microservmon.py +0 -483
- ipulse_shared_core_ftredge/services/user/iam_management_operations.py +0 -326
- ipulse_shared_core_ftredge/services/user/subscription_management_operations.py +0 -384
- ipulse_shared_core_ftredge/services/user/user_account_operations.py +0 -479
- ipulse_shared_core_ftredge/services/user/user_auth_operations.py +0 -305
- ipulse_shared_core_ftredge/services/user/user_holistic_operations.py +0 -436
- ipulse_shared_core_ftredge-19.0.1.dist-info/RECORD +0 -41
- {ipulse_shared_core_ftredge-19.0.1.dist-info → ipulse_shared_core_ftredge-22.1.1.dist-info}/WHEEL +0 -0
- {ipulse_shared_core_ftredge-19.0.1.dist-info → ipulse_shared_core_ftredge-22.1.1.dist-info}/licenses/LICENCE +0 -0
- {ipulse_shared_core_ftredge-19.0.1.dist-info → ipulse_shared_core_ftredge-22.1.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
"""
|
|
2
|
+
TraceMon - Per-trace monitoring and logging
|
|
3
|
+
==========================================
|
|
4
|
+
|
|
5
|
+
TraceMon is designed for complete trace isolation. Each request/operation gets
|
|
6
|
+
its own TraceMon instance, eliminating race conditions and providing clean,
|
|
7
|
+
per-trace context management.
|
|
8
|
+
|
|
9
|
+
Key Design Principles:
|
|
10
|
+
1. One TraceMon per trace (complete isolation)
|
|
11
|
+
2. No shared state between traces
|
|
12
|
+
3. Automatic trace_id inference (no parameter needed)
|
|
13
|
+
4. Thread-safe by design
|
|
14
|
+
5. Collector ID and trace_id are the same (one collector per trace)
|
|
15
|
+
"""
|
|
16
|
+
import uuid
|
|
17
|
+
import time
|
|
18
|
+
from datetime import datetime, timezone
|
|
19
|
+
from typing import Dict, Any, Optional
|
|
20
|
+
from contextlib import contextmanager
|
|
21
|
+
from collections import defaultdict
|
|
22
|
+
from ipulse_shared_base_ftredge import (LogLevel, AbstractResource,
|
|
23
|
+
ProgressStatus, Action,
|
|
24
|
+
Alert, StructLog)
|
|
25
|
+
from ipulse_shared_base_ftredge.status import StatusCounts, map_progress_status_to_log_level, eval_statuses
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class TraceMon:
|
|
29
|
+
"""
|
|
30
|
+
TraceMon handles monitoring and logging for a single trace/request.
|
|
31
|
+
|
|
32
|
+
Each trace gets its own TraceMon instance, providing complete isolation
|
|
33
|
+
and eliminating race conditions between concurrent requests.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(self, base_context: str, logger,
|
|
37
|
+
max_log_field_len: Optional[int] = 8000, # by default PipelineLog has 8000 per field length Limit
|
|
38
|
+
max_log_dict_byte_size: Optional[float] = 256 * 1024 * 0.80, # by default PipelineLog dict has 256 * 1024 * 0.80 -80% of 256Kb Limit
|
|
39
|
+
exclude_none_from_logs: bool = True):
|
|
40
|
+
"""
|
|
41
|
+
Initialize TraceMon for a single trace.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
base_context: Base context containing all execution details
|
|
45
|
+
logger: Logger instance to use
|
|
46
|
+
max_log_field_len: Maximum length for log field values
|
|
47
|
+
max_log_dict_byte_size: Maximum byte size for log dictionary
|
|
48
|
+
exclude_none_from_logs: Whether to exclude None values from logs
|
|
49
|
+
"""
|
|
50
|
+
# Create ID with timestamp prefix and UUID suffix (same pattern as pipelinemon)
|
|
51
|
+
timestamp = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
|
|
52
|
+
uuid_suffix = str(uuid.uuid4())[:8] # Take first 8 chars of UUID
|
|
53
|
+
self._id = f"{timestamp}_{uuid_suffix}"
|
|
54
|
+
|
|
55
|
+
# Core configuration
|
|
56
|
+
self._logger = logger
|
|
57
|
+
self._base_context = base_context
|
|
58
|
+
|
|
59
|
+
# Logging configuration
|
|
60
|
+
self._max_log_field_len = max_log_field_len
|
|
61
|
+
self._max_log_dict_byte_size = max_log_dict_byte_size
|
|
62
|
+
self._exclude_none_from_logs = exclude_none_from_logs
|
|
63
|
+
|
|
64
|
+
# Trace timing
|
|
65
|
+
self._start_time = time.time()
|
|
66
|
+
self._end_time: Optional[float] = None
|
|
67
|
+
|
|
68
|
+
# Context stack (isolated per trace)
|
|
69
|
+
self._context_stack = []
|
|
70
|
+
|
|
71
|
+
# Trace metrics
|
|
72
|
+
self._metrics = {
|
|
73
|
+
"trace_id": self._id, # In TraceMon, collector_id and trace_id are the same
|
|
74
|
+
"start_time": self._start_time,
|
|
75
|
+
"status": str(ProgressStatus.IN_PROGRESS),
|
|
76
|
+
"by_event_count": defaultdict(int),
|
|
77
|
+
"by_level_code_count": defaultdict(int),
|
|
78
|
+
"status_counts": StatusCounts()
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
# Add initial status
|
|
82
|
+
self._metrics["status_counts"].add_status(ProgressStatus.STARTED)
|
|
83
|
+
|
|
84
|
+
# Log trace start
|
|
85
|
+
self._log_trace_start()
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def id(self) -> str:
|
|
89
|
+
"""Get the unique ID for this TraceMon (both collector_id and trace_id)."""
|
|
90
|
+
return self._id
|
|
91
|
+
|
|
92
|
+
@property
|
|
93
|
+
def trace_id(self) -> str:
|
|
94
|
+
"""Get the trace ID for this trace (same as collector_id)."""
|
|
95
|
+
return self._id
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def collector_id(self) -> str:
|
|
99
|
+
"""Get the collector ID for this TraceMon (same as trace_id)."""
|
|
100
|
+
return self._id
|
|
101
|
+
|
|
102
|
+
@property
|
|
103
|
+
def base_context(self) -> str:
|
|
104
|
+
"""Get the base context."""
|
|
105
|
+
return self._base_context
|
|
106
|
+
|
|
107
|
+
@property
|
|
108
|
+
def current_context(self) -> str:
|
|
109
|
+
"""Get the current context stack as a string."""
|
|
110
|
+
if not self._context_stack:
|
|
111
|
+
return self._id
|
|
112
|
+
return f"{self._id} >> " + " >> ".join(self._context_stack)
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def elapsed_ms(self) -> int:
|
|
116
|
+
"""Get elapsed time in milliseconds since trace start."""
|
|
117
|
+
return int((time.time() - self._start_time) * 1000)
|
|
118
|
+
|
|
119
|
+
@property
|
|
120
|
+
def duration_ms(self) -> Optional[int]:
|
|
121
|
+
"""Get total duration in milliseconds (only available after end)."""
|
|
122
|
+
if self._end_time is None:
|
|
123
|
+
return None
|
|
124
|
+
return int((self._end_time - self._start_time) * 1000)
|
|
125
|
+
|
|
126
|
+
@property
|
|
127
|
+
def metrics(self) -> Dict[str, Any]:
|
|
128
|
+
"""Get current trace metrics."""
|
|
129
|
+
metrics = self._metrics.copy()
|
|
130
|
+
metrics["by_event_count"] = dict(metrics["by_event_count"])
|
|
131
|
+
metrics["by_level_code_count"] = dict(metrics["by_level_code_count"])
|
|
132
|
+
metrics["elapsed_ms"] = self.elapsed_ms
|
|
133
|
+
if self.duration_ms is not None:
|
|
134
|
+
metrics["duration_ms"] = self.duration_ms
|
|
135
|
+
return metrics
|
|
136
|
+
|
|
137
|
+
@property
|
|
138
|
+
def is_active(self) -> bool:
|
|
139
|
+
"""Check if trace is still active (not ended)."""
|
|
140
|
+
return self._end_time is None
|
|
141
|
+
|
|
142
|
+
def _log_trace_start(self):
|
|
143
|
+
"""Log the trace start event."""
|
|
144
|
+
start_log = StructLog(
|
|
145
|
+
level=LogLevel.INFO,
|
|
146
|
+
description=f"Starting trace {self._id}",
|
|
147
|
+
resource=AbstractResource.TRACEMON,
|
|
148
|
+
action=Action.EXECUTE,
|
|
149
|
+
progress_status=ProgressStatus.STARTED,
|
|
150
|
+
)
|
|
151
|
+
self.log(start_log)
|
|
152
|
+
|
|
153
|
+
@contextmanager
|
|
154
|
+
def context(self, context_name: str):
|
|
155
|
+
"""
|
|
156
|
+
Context manager for tracking execution context within this trace.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
context_name: The name of the execution context
|
|
160
|
+
"""
|
|
161
|
+
self.push_context(context_name)
|
|
162
|
+
try:
|
|
163
|
+
yield
|
|
164
|
+
finally:
|
|
165
|
+
self.pop_context()
|
|
166
|
+
|
|
167
|
+
def push_context(self, context: str):
|
|
168
|
+
"""Add a context level to the stack."""
|
|
169
|
+
self._context_stack.append(context)
|
|
170
|
+
|
|
171
|
+
def pop_context(self):
|
|
172
|
+
"""Remove the most recent context from the stack."""
|
|
173
|
+
if self._context_stack:
|
|
174
|
+
return self._context_stack.pop()
|
|
175
|
+
|
|
176
|
+
def log(self, log: StructLog) -> None:
|
|
177
|
+
"""
|
|
178
|
+
Log a StructLog message with this trace's context.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
log: StructLog instance to log
|
|
182
|
+
"""
|
|
183
|
+
if not self.is_active:
|
|
184
|
+
# Still allow logging but add a warning note
|
|
185
|
+
existing_note = log.note or ""
|
|
186
|
+
warning_note = "TRACE_ENDED"
|
|
187
|
+
log.note = f"{existing_note}; {warning_note}" if existing_note else warning_note
|
|
188
|
+
|
|
189
|
+
# Set trace-specific context
|
|
190
|
+
log.trace_id = self._id
|
|
191
|
+
log.collector_id = self._id
|
|
192
|
+
log.base_context = self._base_context
|
|
193
|
+
log.context = self.current_context
|
|
194
|
+
|
|
195
|
+
# Add elapsed time
|
|
196
|
+
existing_note = log.note or ""
|
|
197
|
+
elapsed_note = f"elapsed_ms: {self.elapsed_ms}"
|
|
198
|
+
log.note = f"{existing_note}; {elapsed_note}" if existing_note else elapsed_note
|
|
199
|
+
|
|
200
|
+
# Update trace metrics
|
|
201
|
+
self._update_counts(log)
|
|
202
|
+
|
|
203
|
+
# Write to logger
|
|
204
|
+
self._write_log_to_logger(log)
|
|
205
|
+
|
|
206
|
+
def _update_counts(self, log: StructLog):
|
|
207
|
+
"""Update trace metrics based on the log event."""
|
|
208
|
+
event_tuple = log.getEvent()
|
|
209
|
+
level = log.level
|
|
210
|
+
|
|
211
|
+
self._metrics["by_event_count"][event_tuple] += 1
|
|
212
|
+
self._metrics["by_level_code_count"][level.value] += 1
|
|
213
|
+
|
|
214
|
+
def _get_error_count(self) -> int:
|
|
215
|
+
"""Get total error count (ERROR + CRITICAL)."""
|
|
216
|
+
return (self._metrics["by_level_code_count"].get(LogLevel.ERROR.value, 0) +
|
|
217
|
+
self._metrics["by_level_code_count"].get(LogLevel.CRITICAL.value, 0))
|
|
218
|
+
|
|
219
|
+
def _get_warning_count(self) -> int:
|
|
220
|
+
"""Get warning count."""
|
|
221
|
+
return self._metrics["by_level_code_count"].get(LogLevel.WARNING.value, 0)
|
|
222
|
+
|
|
223
|
+
def _get_notice_count(self) -> int:
|
|
224
|
+
"""Get notice count."""
|
|
225
|
+
return self._metrics["by_level_code_count"].get(LogLevel.NOTICE.value, 0)
|
|
226
|
+
|
|
227
|
+
def end(self, force_status: Optional[ProgressStatus] = None, issues_allowed: bool = True) -> Dict[str, Any]:
|
|
228
|
+
"""
|
|
229
|
+
End this trace and return final metrics.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
force_status: Optional status to force, overriding automatic calculation
|
|
233
|
+
issues_allowed: Whether issues (errors) are allowed for this trace
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
Dict containing final trace metrics
|
|
237
|
+
"""
|
|
238
|
+
if not self.is_active:
|
|
239
|
+
# Already ended, return existing metrics
|
|
240
|
+
return self.metrics
|
|
241
|
+
|
|
242
|
+
self._end_time = time.time()
|
|
243
|
+
|
|
244
|
+
# Calculate final status
|
|
245
|
+
error_count = self._get_error_count()
|
|
246
|
+
warning_count = self._get_warning_count()
|
|
247
|
+
notice_count = self._get_notice_count()
|
|
248
|
+
|
|
249
|
+
if force_status is not None:
|
|
250
|
+
final_status = force_status
|
|
251
|
+
level = map_progress_status_to_log_level(final_status)
|
|
252
|
+
else:
|
|
253
|
+
# Simple status calculation based on error/warning/notice counts
|
|
254
|
+
if error_count > 0:
|
|
255
|
+
if issues_allowed:
|
|
256
|
+
final_status = ProgressStatus.FINISHED_WITH_ISSUES
|
|
257
|
+
else:
|
|
258
|
+
final_status = ProgressStatus.FAILED
|
|
259
|
+
elif warning_count > 0:
|
|
260
|
+
final_status = ProgressStatus.DONE_WITH_WARNINGS
|
|
261
|
+
elif notice_count > 0:
|
|
262
|
+
final_status = ProgressStatus.DONE_WITH_NOTICES
|
|
263
|
+
else:
|
|
264
|
+
final_status = ProgressStatus.DONE
|
|
265
|
+
|
|
266
|
+
level = map_progress_status_to_log_level(final_status)
|
|
267
|
+
|
|
268
|
+
# Update trace status
|
|
269
|
+
self._metrics["status"] = str(final_status)
|
|
270
|
+
self._metrics["status_counts"].add_status(final_status)
|
|
271
|
+
|
|
272
|
+
# Log completion
|
|
273
|
+
status_source = "FORCED" if force_status is not None else "AUTO"
|
|
274
|
+
summary_msg = (
|
|
275
|
+
f"Trace {self._id} completed with status {str(final_status)} ({status_source}). "
|
|
276
|
+
f"Duration: {self.duration_ms}ms. "
|
|
277
|
+
f"Errors: {error_count}, Warnings: {warning_count}, Notices: {notice_count}"
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
completion_log = StructLog(
|
|
281
|
+
level=level,
|
|
282
|
+
description=summary_msg,
|
|
283
|
+
resource=AbstractResource.TRACEMON,
|
|
284
|
+
action=Action.EXECUTE,
|
|
285
|
+
progress_status=final_status
|
|
286
|
+
)
|
|
287
|
+
self.log(completion_log)
|
|
288
|
+
|
|
289
|
+
return self.metrics
|
|
290
|
+
|
|
291
|
+
def get_readable_level_counts(self) -> Dict[str, int]:
|
|
292
|
+
"""Get readable level counts for this trace."""
|
|
293
|
+
readable_counts = {}
|
|
294
|
+
|
|
295
|
+
for level_code, count in self._metrics["by_level_code_count"].items():
|
|
296
|
+
if count > 0:
|
|
297
|
+
for level in LogLevel:
|
|
298
|
+
if level.value == level_code:
|
|
299
|
+
readable_counts[str(level)] = count
|
|
300
|
+
break
|
|
301
|
+
|
|
302
|
+
return readable_counts
|
|
303
|
+
|
|
304
|
+
def _write_log_to_logger(self, log: StructLog):
|
|
305
|
+
"""Write structured log to the logger."""
|
|
306
|
+
log_dict = log.to_dict(
|
|
307
|
+
max_field_len=self._max_log_field_len,
|
|
308
|
+
byte_size_limit=self._max_log_dict_byte_size,
|
|
309
|
+
exclude_none=self._exclude_none_from_logs
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
# Write to logger based on level
|
|
313
|
+
if log.level.value >= LogLevel.ERROR.value:
|
|
314
|
+
self._logger.error(log_dict)
|
|
315
|
+
elif log.level.value >= LogLevel.WARNING.value:
|
|
316
|
+
self._logger.warning(log_dict)
|
|
317
|
+
elif log.level.value >= LogLevel.INFO.value:
|
|
318
|
+
self._logger.info(log_dict)
|
|
319
|
+
else:
|
|
320
|
+
self._logger.debug(log_dict)
|
|
@@ -2,24 +2,22 @@
|
|
|
2
2
|
|
|
3
3
|
|
|
4
4
|
# Import from base services
|
|
5
|
-
from .base import BaseFirestoreService
|
|
6
|
-
from .cache_aware_firestore_service import CacheAwareFirestoreService
|
|
5
|
+
from .base import BaseFirestoreService, CacheAwareFirestoreService
|
|
7
6
|
|
|
8
7
|
from .charging_processors import ChargingProcessor
|
|
9
|
-
from .
|
|
8
|
+
from .user_charging_service import UserChargingService
|
|
10
9
|
|
|
11
10
|
# Import user services from the user package
|
|
12
11
|
from .user import (
|
|
13
12
|
UserCoreService,
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
13
|
+
UserauthOperations,
|
|
14
|
+
UserpermissionsOperations,
|
|
15
|
+
UsersubscriptionOperations,
|
|
16
|
+
UsermultistepOperations,
|
|
17
17
|
)
|
|
18
18
|
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
'SubscriptionPlanDocument', 'UserTypeDefaultsDocument'
|
|
25
|
-
]
|
|
19
|
+
# Import catalog services
|
|
20
|
+
from .catalog import (
|
|
21
|
+
CatalogSubscriptionPlanService,
|
|
22
|
+
CatalogUserTypeService,
|
|
23
|
+
)
|
|
@@ -6,7 +6,9 @@ preventing circular import dependencies.
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
from .base_firestore_service import BaseFirestoreService
|
|
9
|
+
from .cache_aware_firestore_service import CacheAwareFirestoreService
|
|
9
10
|
|
|
10
11
|
__all__ = [
|
|
11
|
-
'BaseFirestoreService'
|
|
12
|
+
'BaseFirestoreService',
|
|
13
|
+
'CacheAwareFirestoreService'
|
|
12
14
|
]
|
|
@@ -7,7 +7,7 @@ This provides the foundation for all Firestore-based services.
|
|
|
7
7
|
|
|
8
8
|
import json
|
|
9
9
|
import logging
|
|
10
|
-
from datetime import datetime, timezone
|
|
10
|
+
from datetime import datetime, timezone, date
|
|
11
11
|
from typing import Any, AsyncGenerator, Dict, Generic, List, Optional, TypeVar, Type, Union
|
|
12
12
|
|
|
13
13
|
from google.cloud import firestore
|
|
@@ -20,6 +20,32 @@ from ...exceptions import ResourceNotFoundError, ServiceError, ValidationError a
|
|
|
20
20
|
T = TypeVar('T', bound=BaseModel)
|
|
21
21
|
|
|
22
22
|
|
|
23
|
+
def _sanitize_firestore_data(data: Any) -> Any:
|
|
24
|
+
"""
|
|
25
|
+
Recursively sanitize data before sending to Firestore.
|
|
26
|
+
Converts datetime.date objects to datetime.datetime objects since Firestore
|
|
27
|
+
only supports datetime.datetime, not datetime.date.
|
|
28
|
+
"""
|
|
29
|
+
if isinstance(data, date) and not isinstance(data, datetime):
|
|
30
|
+
# Convert date to datetime (start of day in UTC)
|
|
31
|
+
return datetime.combine(data, datetime.min.time()).replace(tzinfo=timezone.utc)
|
|
32
|
+
|
|
33
|
+
if isinstance(data, BaseModel):
|
|
34
|
+
# Convert Pydantic model to dict and sanitize recursively
|
|
35
|
+
return _sanitize_firestore_data(data.model_dump())
|
|
36
|
+
|
|
37
|
+
if isinstance(data, dict):
|
|
38
|
+
# Recurse into dictionaries
|
|
39
|
+
return {k: _sanitize_firestore_data(v) for k, v in data.items()}
|
|
40
|
+
|
|
41
|
+
if isinstance(data, list):
|
|
42
|
+
# Recurse into lists
|
|
43
|
+
return [_sanitize_firestore_data(item) for item in data]
|
|
44
|
+
|
|
45
|
+
# Return everything else as-is (str, int, float, bool, datetime, etc.)
|
|
46
|
+
return data
|
|
47
|
+
|
|
48
|
+
|
|
23
49
|
class BaseFirestoreService(Generic[T]):
|
|
24
50
|
"""
|
|
25
51
|
Base service class for Firestore operations using Pydantic models
|
|
@@ -95,7 +121,7 @@ class BaseFirestoreService(Generic[T]):
|
|
|
95
121
|
additional_info={"validation_errors": e.errors()}
|
|
96
122
|
)
|
|
97
123
|
|
|
98
|
-
async def get_document(self, doc_id: str, convert_to_model: bool = True) -> Union[Dict[str, Any]
|
|
124
|
+
async def get_document(self, doc_id: str, convert_to_model: bool = True) -> Union[T, Dict[str, Any]]:
|
|
99
125
|
"""
|
|
100
126
|
Get a document by ID
|
|
101
127
|
|
|
@@ -104,7 +130,7 @@ class BaseFirestoreService(Generic[T]):
|
|
|
104
130
|
convert_to_model: Whether to convert to Pydantic model
|
|
105
131
|
|
|
106
132
|
Returns:
|
|
107
|
-
Document as
|
|
133
|
+
Document as a model instance or dict.
|
|
108
134
|
|
|
109
135
|
Raises:
|
|
110
136
|
ResourceNotFoundError: If document doesn't exist
|
|
@@ -121,7 +147,21 @@ class BaseFirestoreService(Generic[T]):
|
|
|
121
147
|
)
|
|
122
148
|
|
|
123
149
|
doc_dict = doc.to_dict()
|
|
124
|
-
|
|
150
|
+
if not doc_dict:
|
|
151
|
+
# This case should ideally not be reached if doc.exists is true,
|
|
152
|
+
# but as a safeguard:
|
|
153
|
+
raise ServiceError(
|
|
154
|
+
operation="get_document",
|
|
155
|
+
error=ValueError("Document exists but data is empty."),
|
|
156
|
+
resource_type=self.resource_type,
|
|
157
|
+
resource_id=doc_id
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
if convert_to_model and self.model_class:
|
|
161
|
+
return self._convert_to_model(doc_dict, doc_id)
|
|
162
|
+
else:
|
|
163
|
+
doc_dict['id'] = doc_id
|
|
164
|
+
return doc_dict
|
|
125
165
|
|
|
126
166
|
except ResourceNotFoundError:
|
|
127
167
|
raise
|
|
@@ -162,6 +202,9 @@ class BaseFirestoreService(Generic[T]):
|
|
|
162
202
|
else:
|
|
163
203
|
doc_dict = data.copy()
|
|
164
204
|
|
|
205
|
+
# Sanitize data for Firestore (convert date objects to datetime)
|
|
206
|
+
doc_dict = _sanitize_firestore_data(doc_dict)
|
|
207
|
+
|
|
165
208
|
# Ensure ID is set correctly
|
|
166
209
|
doc_dict['id'] = doc_id
|
|
167
210
|
|
|
@@ -234,6 +277,10 @@ class BaseFirestoreService(Generic[T]):
|
|
|
234
277
|
|
|
235
278
|
# Add update timestamp and user
|
|
236
279
|
updates = update_data.copy()
|
|
280
|
+
|
|
281
|
+
# Sanitize data for Firestore (convert date objects to datetime)
|
|
282
|
+
updates = _sanitize_firestore_data(updates)
|
|
283
|
+
|
|
237
284
|
updates['updated_at'] = datetime.now(timezone.utc)
|
|
238
285
|
updates['updated_by'] = updater_uid
|
|
239
286
|
|
|
@@ -310,7 +357,7 @@ class BaseFirestoreService(Generic[T]):
|
|
|
310
357
|
"""
|
|
311
358
|
try:
|
|
312
359
|
doc_ref = self._get_collection().document(doc_id)
|
|
313
|
-
doc =
|
|
360
|
+
doc = doc_ref.get() # Remove await - this is synchronous
|
|
314
361
|
return doc.exists
|
|
315
362
|
except Exception as e:
|
|
316
363
|
raise ServiceError(
|
|
@@ -325,8 +372,9 @@ class BaseFirestoreService(Generic[T]):
|
|
|
325
372
|
limit: Optional[int] = None,
|
|
326
373
|
start_after: Optional[str] = None,
|
|
327
374
|
order_by: Optional[str] = None,
|
|
328
|
-
filters: Optional[List[FieldFilter]] = None
|
|
329
|
-
|
|
375
|
+
filters: Optional[List[FieldFilter]] = None,
|
|
376
|
+
as_models: bool = True
|
|
377
|
+
) -> Union[List[T], List[Dict[str, Any]]]:
|
|
330
378
|
"""
|
|
331
379
|
List documents with optional filtering and pagination
|
|
332
380
|
|
|
@@ -335,9 +383,10 @@ class BaseFirestoreService(Generic[T]):
|
|
|
335
383
|
start_after: Document ID to start after for pagination
|
|
336
384
|
order_by: Field to order by
|
|
337
385
|
filters: List of field filters
|
|
386
|
+
as_models: Whether to convert documents to Pydantic models
|
|
338
387
|
|
|
339
388
|
Returns:
|
|
340
|
-
List of documents as model instances
|
|
389
|
+
List of documents as model instances or dicts
|
|
341
390
|
|
|
342
391
|
Raises:
|
|
343
392
|
ServiceError: If an error occurs during listing
|
|
@@ -348,7 +397,8 @@ class BaseFirestoreService(Generic[T]):
|
|
|
348
397
|
# Apply filters
|
|
349
398
|
if filters:
|
|
350
399
|
for filter_condition in filters:
|
|
351
|
-
|
|
400
|
+
field, operator, value = filter_condition
|
|
401
|
+
query = query.where(field, operator, value)
|
|
352
402
|
|
|
353
403
|
# Apply ordering
|
|
354
404
|
if order_by:
|
|
@@ -373,7 +423,7 @@ class BaseFirestoreService(Generic[T]):
|
|
|
373
423
|
if doc_dict is None:
|
|
374
424
|
continue # Skip documents that don't exist
|
|
375
425
|
|
|
376
|
-
if self.model_class:
|
|
426
|
+
if as_models and self.model_class:
|
|
377
427
|
model_instance = self._convert_to_model(doc_dict, doc.id)
|
|
378
428
|
results.append(model_instance)
|
|
379
429
|
else:
|
|
@@ -412,20 +462,27 @@ class BaseFirestoreService(Generic[T]):
|
|
|
412
462
|
ServiceError: If an error occurs during archival
|
|
413
463
|
"""
|
|
414
464
|
try:
|
|
465
|
+
# Generate unique archive document ID to handle duplicates
|
|
466
|
+
archive_timestamp = datetime.now(timezone.utc)
|
|
467
|
+
timestamp_str = archive_timestamp.strftime("%Y%m%d_%H%M%S_%f")[:-3] # microseconds to milliseconds
|
|
468
|
+
unique_archive_doc_id = f"{doc_id}_{timestamp_str}"
|
|
469
|
+
|
|
415
470
|
# Add archival metadata
|
|
416
471
|
archive_data = document_data.copy()
|
|
417
472
|
archive_data.update({
|
|
418
|
-
"archived_at":
|
|
473
|
+
"archived_at": archive_timestamp,
|
|
419
474
|
"archived_by": archived_by,
|
|
475
|
+
"updated_at": archive_timestamp,
|
|
476
|
+
"updated_by": archived_by,
|
|
420
477
|
"original_collection": self.collection_name,
|
|
421
478
|
"original_doc_id": doc_id
|
|
422
479
|
})
|
|
423
480
|
|
|
424
|
-
# Store in archive collection
|
|
425
|
-
archive_ref = self.db.collection(archive_collection).document(
|
|
481
|
+
# Store in archive collection with unique ID
|
|
482
|
+
archive_ref = self.db.collection(archive_collection).document(unique_archive_doc_id)
|
|
426
483
|
archive_ref.set(archive_data, timeout=self.timeout)
|
|
427
484
|
|
|
428
|
-
self.logger.info(f"Successfully archived {self.resource_type} {doc_id} to {archive_collection}")
|
|
485
|
+
self.logger.info(f"Successfully archived {self.resource_type} {doc_id} to {archive_collection} as {unique_archive_doc_id}")
|
|
429
486
|
return True
|
|
430
487
|
|
|
431
488
|
except Exception as e:
|
|
@@ -518,3 +575,5 @@ class BaseFirestoreService(Generic[T]):
|
|
|
518
575
|
resource_type=self.resource_type,
|
|
519
576
|
resource_id=doc_id
|
|
520
577
|
)
|
|
578
|
+
|
|
579
|
+
|