ipulse-shared-core-ftredge 11.1.1__py3-none-any.whl → 13.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.
- ipulse_shared_core_ftredge/__init__.py +1 -1
- ipulse_shared_core_ftredge/cache/__init__.py +4 -0
- ipulse_shared_core_ftredge/cache/shared_cache.py +249 -0
- ipulse_shared_core_ftredge/dependencies/authz_for_apis.py +123 -21
- ipulse_shared_core_ftredge/models/base_data_model.py +21 -7
- ipulse_shared_core_ftredge/models/user_status.py +4 -0
- ipulse_shared_core_ftredge/services/__init__.py +13 -5
- ipulse_shared_core_ftredge/services/base_firestore_service.py +95 -17
- ipulse_shared_core_ftredge/services/cache_aware_firestore_service.py +171 -0
- ipulse_shared_core_ftredge/services/credit_service.py +270 -0
- {ipulse_shared_core_ftredge-11.1.1.dist-info → ipulse_shared_core_ftredge-13.0.1.dist-info}/METADATA +2 -2
- {ipulse_shared_core_ftredge-11.1.1.dist-info → ipulse_shared_core_ftredge-13.0.1.dist-info}/RECORD +15 -11
- {ipulse_shared_core_ftredge-11.1.1.dist-info → ipulse_shared_core_ftredge-13.0.1.dist-info}/WHEEL +1 -1
- {ipulse_shared_core_ftredge-11.1.1.dist-info → ipulse_shared_core_ftredge-13.0.1.dist-info}/licenses/LICENCE +0 -0
- {ipulse_shared_core_ftredge-11.1.1.dist-info → ipulse_shared_core_ftredge-13.0.1.dist-info}/top_level.txt +0 -0
|
@@ -7,6 +7,6 @@ from .models import ( UserAuth, UserProfile,Subscription,
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
from .services import (BaseFirestoreService,BaseServiceException, ResourceNotFoundError, AuthorizationError,
|
|
10
|
-
ValidationError)
|
|
10
|
+
ValidationError, ServiceError)
|
|
11
11
|
|
|
12
12
|
from .utils import (EnsureJSONEncoderCompatibility)
|
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
"""Module for shared caching functionality that can be used across microservices."""
|
|
2
|
+
import os
|
|
3
|
+
import time
|
|
4
|
+
import logging
|
|
5
|
+
import traceback
|
|
6
|
+
import inspect
|
|
7
|
+
import asyncio
|
|
8
|
+
from typing import Dict, Any, Optional, TypeVar, Generic, Callable, Tuple, List, Awaitable
|
|
9
|
+
|
|
10
|
+
T = TypeVar('T')
|
|
11
|
+
|
|
12
|
+
class SharedCache(Generic[T]):
|
|
13
|
+
"""
|
|
14
|
+
Generic shared cache implementation that can be used across services.
|
|
15
|
+
|
|
16
|
+
Attributes:
|
|
17
|
+
name: The name of the cache for logging and identification.
|
|
18
|
+
ttl: Time-to-live in seconds for cached items.
|
|
19
|
+
enabled: Whether the cache is enabled.
|
|
20
|
+
logger: Logger for cache operations.
|
|
21
|
+
_cache: Dictionary holding cached values.
|
|
22
|
+
_timestamps: Dictionary holding timestamps for each cached item.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
name: str,
|
|
28
|
+
ttl: float,
|
|
29
|
+
enabled: bool = True,
|
|
30
|
+
logger: Optional[logging.Logger] = None
|
|
31
|
+
):
|
|
32
|
+
"""Initialize the cache with name, TTL and enabled state."""
|
|
33
|
+
self.name = name
|
|
34
|
+
self.ttl = ttl
|
|
35
|
+
self.enabled = enabled
|
|
36
|
+
self.logger = logger or logging.getLogger(__name__)
|
|
37
|
+
self._cache: Dict[str, T] = {}
|
|
38
|
+
self._timestamps: Dict[str, float] = {}
|
|
39
|
+
|
|
40
|
+
self.logger.info(f"{name} cache initialized. Enabled: {enabled}, TTL: {ttl} seconds")
|
|
41
|
+
|
|
42
|
+
def get(self, key: str) -> Optional[T]:
|
|
43
|
+
"""
|
|
44
|
+
Get a value from the cache if it exists and hasn't expired.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
key: The cache key to retrieve.
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
The cached value if found and valid, None otherwise.
|
|
51
|
+
"""
|
|
52
|
+
if not self.enabled:
|
|
53
|
+
return None
|
|
54
|
+
|
|
55
|
+
try:
|
|
56
|
+
if key in self._cache:
|
|
57
|
+
timestamp = self._timestamps.get(key, 0)
|
|
58
|
+
if time.time() - timestamp < self.ttl:
|
|
59
|
+
self.logger.debug(f"Cache hit for {key} in {self.name}")
|
|
60
|
+
return self._cache[key]
|
|
61
|
+
else:
|
|
62
|
+
# Expired item, remove it
|
|
63
|
+
self.invalidate(key)
|
|
64
|
+
self.logger.debug(f"Cache expired for {key} in {self.name}")
|
|
65
|
+
except Exception as e:
|
|
66
|
+
self.logger.error(f"Error getting item from {self.name} cache with key {key}: {str(e)}")
|
|
67
|
+
self.logger.error(traceback.format_exc())
|
|
68
|
+
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
def set(self, key: str, value: T) -> None:
|
|
72
|
+
"""
|
|
73
|
+
Set a value in the cache.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
key: The cache key to set.
|
|
77
|
+
value: The value to cache.
|
|
78
|
+
"""
|
|
79
|
+
if not self.enabled:
|
|
80
|
+
return
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
self._cache[key] = value
|
|
84
|
+
self._timestamps[key] = time.time()
|
|
85
|
+
self.logger.debug(f"Cached item {key} in {self.name}")
|
|
86
|
+
except Exception as e:
|
|
87
|
+
self.logger.error(f"Error setting item in {self.name} cache with key {key}: {str(e)}")
|
|
88
|
+
self.logger.error(traceback.format_exc())
|
|
89
|
+
|
|
90
|
+
def invalidate(self, key: str) -> None:
|
|
91
|
+
"""
|
|
92
|
+
Remove a specific key from the cache.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
key: The cache key to invalidate.
|
|
96
|
+
"""
|
|
97
|
+
try:
|
|
98
|
+
self._cache.pop(key, None)
|
|
99
|
+
self._timestamps.pop(key, None)
|
|
100
|
+
self.logger.debug(f"Invalidated cache for {key} in {self.name}")
|
|
101
|
+
except Exception as e:
|
|
102
|
+
self.logger.error(f"Error invalidating cache in {self.name} for key {key}: {str(e)}")
|
|
103
|
+
self.logger.error(traceback.format_exc())
|
|
104
|
+
|
|
105
|
+
def invalidate_all(self) -> None:
|
|
106
|
+
"""Clear all cached items."""
|
|
107
|
+
try:
|
|
108
|
+
cache_size = len(self._cache)
|
|
109
|
+
self._cache.clear()
|
|
110
|
+
self._timestamps.clear()
|
|
111
|
+
self.logger.info(f"Invalidated all {cache_size} entries in {self.name} cache")
|
|
112
|
+
except Exception as e:
|
|
113
|
+
self.logger.error(f"Error invalidating all cache entries in {self.name}: {str(e)}")
|
|
114
|
+
self.logger.error(traceback.format_exc())
|
|
115
|
+
|
|
116
|
+
def get_or_set(
|
|
117
|
+
self,
|
|
118
|
+
key: str,
|
|
119
|
+
data_loader: Callable[[], T]
|
|
120
|
+
) -> Tuple[T, bool]:
|
|
121
|
+
"""
|
|
122
|
+
Get a value from cache or set it using the data_loader if missing or expired.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
key: The cache key.
|
|
126
|
+
data_loader: Function to load data if not in cache.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
Tuple of (data, was_cached) where was_cached indicates if from cache.
|
|
130
|
+
"""
|
|
131
|
+
try:
|
|
132
|
+
cached_data = self.get(key)
|
|
133
|
+
if cached_data is not None:
|
|
134
|
+
return cached_data, True
|
|
135
|
+
|
|
136
|
+
# Not in cache or expired, load the data
|
|
137
|
+
self.logger.debug(f"Cache miss for {key} in {self.name}, loading data...")
|
|
138
|
+
|
|
139
|
+
# Check if the data_loader is a coroutine function
|
|
140
|
+
if inspect.iscoroutinefunction(data_loader):
|
|
141
|
+
self.logger.error(
|
|
142
|
+
f"Error in get_or_set for {key} in {self.name}: "
|
|
143
|
+
f"data_loader is a coroutine function which is not supported. "
|
|
144
|
+
f"Use a regular function that returns a value, not a coroutine."
|
|
145
|
+
)
|
|
146
|
+
# Fall back to running the coroutine in the event loop if possible
|
|
147
|
+
try:
|
|
148
|
+
loop = asyncio.get_event_loop()
|
|
149
|
+
fresh_data = loop.run_until_complete(data_loader())
|
|
150
|
+
except Exception as coro_err:
|
|
151
|
+
self.logger.error(f"Failed to execute coroutine data_loader: {str(coro_err)}")
|
|
152
|
+
raise RuntimeError(f"Cannot use coroutine data_loader in cache: {str(coro_err)}")
|
|
153
|
+
else:
|
|
154
|
+
# Regular function, just call it
|
|
155
|
+
fresh_data = data_loader()
|
|
156
|
+
|
|
157
|
+
if fresh_data is not None: # Only cache if we got valid data
|
|
158
|
+
self.set(key, fresh_data)
|
|
159
|
+
|
|
160
|
+
if fresh_data is None:
|
|
161
|
+
raise ValueError(f"Data loader returned None for key {key} in {self.name}")
|
|
162
|
+
return fresh_data, False
|
|
163
|
+
except Exception as e:
|
|
164
|
+
self.logger.error(f"Error in get_or_set for {key} in {self.name}: {str(e)}")
|
|
165
|
+
self.logger.error(traceback.format_exc())
|
|
166
|
+
|
|
167
|
+
# Since this is a critical function, re-raise the exception
|
|
168
|
+
# after logging it, but add context about the cache
|
|
169
|
+
raise RuntimeError(f"Cache error in {self.name} for key {key}: {str(e)}") from e
|
|
170
|
+
|
|
171
|
+
async def async_get_or_set(
|
|
172
|
+
self,
|
|
173
|
+
key: str,
|
|
174
|
+
async_data_loader: Callable[[], Awaitable[T]]
|
|
175
|
+
) -> Tuple[T, bool]:
|
|
176
|
+
"""
|
|
177
|
+
Async version of get_or_set for use with async data loaders.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
key: The cache key.
|
|
181
|
+
async_data_loader: Async function to load data if not in cache.
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
Tuple of (data, was_cached) where was_cached indicates if from cache.
|
|
185
|
+
"""
|
|
186
|
+
try:
|
|
187
|
+
cached_data = self.get(key)
|
|
188
|
+
if cached_data is not None:
|
|
189
|
+
return cached_data, True
|
|
190
|
+
|
|
191
|
+
# Not in cache or expired, load the data asynchronously
|
|
192
|
+
self.logger.debug(f"Cache miss for {key} in {self.name}, loading data asynchronously...")
|
|
193
|
+
|
|
194
|
+
# Execute the async data loader
|
|
195
|
+
fresh_data = await async_data_loader()
|
|
196
|
+
|
|
197
|
+
if fresh_data is not None: # Only cache if we got valid data
|
|
198
|
+
self.set(key, fresh_data)
|
|
199
|
+
|
|
200
|
+
return fresh_data, False
|
|
201
|
+
except Exception as e:
|
|
202
|
+
self.logger.error(f"Error in async_get_or_set for {key} in {self.name}: {str(e)}")
|
|
203
|
+
self.logger.error(traceback.format_exc())
|
|
204
|
+
raise RuntimeError(f"Cache error in {self.name} for key {key}: {str(e)}") from e
|
|
205
|
+
|
|
206
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
207
|
+
"""Get statistics about the current cache state."""
|
|
208
|
+
try:
|
|
209
|
+
return {
|
|
210
|
+
"name": self.name,
|
|
211
|
+
"enabled": self.enabled,
|
|
212
|
+
"ttl_seconds": self.ttl,
|
|
213
|
+
"item_count": len(self._cache),
|
|
214
|
+
"keys": list(self._cache.keys())[:20], # Limit to first 20 keys
|
|
215
|
+
"has_more_keys": len(self._cache.keys()) > 20,
|
|
216
|
+
"memory_usage_estimate_bytes": sum(
|
|
217
|
+
len(str(k)) + self._estimate_size(v)
|
|
218
|
+
for k, v in self._cache.items()
|
|
219
|
+
)
|
|
220
|
+
}
|
|
221
|
+
except Exception as e:
|
|
222
|
+
self.logger.error(f"Error getting stats for {self.name} cache: {str(e)}")
|
|
223
|
+
self.logger.error(traceback.format_exc())
|
|
224
|
+
return {
|
|
225
|
+
"name": self.name,
|
|
226
|
+
"enabled": self.enabled,
|
|
227
|
+
"error": str(e),
|
|
228
|
+
"ttl_seconds": self.ttl,
|
|
229
|
+
"item_count": len(self._cache) if self._cache else 0
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
def _estimate_size(self, obj: Any) -> int:
|
|
233
|
+
"""Estimate the memory size of an object in bytes."""
|
|
234
|
+
try:
|
|
235
|
+
if obj is None:
|
|
236
|
+
return 0
|
|
237
|
+
if isinstance(obj, (str, bytes, bytearray)):
|
|
238
|
+
return len(obj)
|
|
239
|
+
if isinstance(obj, (int, float, bool)):
|
|
240
|
+
return 8
|
|
241
|
+
if isinstance(obj, dict):
|
|
242
|
+
return sum(len(str(k)) + self._estimate_size(v) for k, v in obj.items())
|
|
243
|
+
if isinstance(obj, (list, tuple, set)):
|
|
244
|
+
return sum(self._estimate_size(i) for i in obj)
|
|
245
|
+
# For other objects, use a rough approximation
|
|
246
|
+
return len(str(obj))
|
|
247
|
+
except Exception:
|
|
248
|
+
# If we can't estimate, return a reasonable default
|
|
249
|
+
return 100
|
|
@@ -1,5 +1,8 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import logging
|
|
3
|
+
import json
|
|
4
|
+
import asyncio
|
|
5
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
3
6
|
from typing import Optional, Iterable, Dict, Any, List
|
|
4
7
|
from datetime import datetime, timedelta, timezone
|
|
5
8
|
import json
|
|
@@ -10,10 +13,10 @@ from ipulse_shared_core_ftredge.services import ServiceError, AuthorizationError
|
|
|
10
13
|
from ipulse_shared_core_ftredge.models import UserStatus
|
|
11
14
|
from ipulse_shared_core_ftredge.utils.json_encoder import convert_to_json_serializable
|
|
12
15
|
|
|
13
|
-
# Constants
|
|
14
|
-
USERS_STATUS_COLLECTION_NAME = UserStatus.
|
|
15
|
-
USERS_STATUS_DOC_REF = "
|
|
16
|
-
|
|
16
|
+
# Constants derived from UserStatus model
|
|
17
|
+
USERS_STATUS_COLLECTION_NAME = UserStatus.COLLECTION_NAME
|
|
18
|
+
USERS_STATUS_DOC_REF = f"{UserStatus.OBJ_REF}_" # Use OBJ_REF and append underscore
|
|
19
|
+
USERSTATUS_CACHE_TTL = 60 # 60 seconds
|
|
17
20
|
|
|
18
21
|
class UserStatusCache:
|
|
19
22
|
"""Manages user status caching with dynamic invalidation"""
|
|
@@ -33,7 +36,7 @@ class UserStatusCache:
|
|
|
33
36
|
status_data = self._cache[user_uid]
|
|
34
37
|
# Force refresh for credit-consuming or sensitive operations
|
|
35
38
|
# Check TTL for normal operations
|
|
36
|
-
if datetime.now() - self._timestamps[user_uid] < timedelta(seconds=
|
|
39
|
+
if datetime.now() - self._timestamps[user_uid] < timedelta(seconds=USERSTATUS_CACHE_TTL):
|
|
37
40
|
return status_data
|
|
38
41
|
self.invalidate(user_uid)
|
|
39
42
|
return None
|
|
@@ -65,13 +68,59 @@ userstatus_cache = UserStatusCache()
|
|
|
65
68
|
# Replace the logger dependency with a standard logger
|
|
66
69
|
logger = logging.getLogger(__name__)
|
|
67
70
|
|
|
71
|
+
# Create a custom FirestoreTimeoutError class that can be identified in middlewares
|
|
72
|
+
class FirestoreTimeoutError(TimeoutError):
|
|
73
|
+
"""Custom exception for Firestore timeout errors to make them more identifiable."""
|
|
74
|
+
pass
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
# Define a function to get a Firestore document with a strict timeout
|
|
78
|
+
async def get_with_strict_timeout(doc_ref, timeout_seconds: float):
|
|
79
|
+
"""
|
|
80
|
+
Get a Firestore document with a strictly enforced timeout.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
doc_ref: Firestore document reference
|
|
84
|
+
timeout_seconds: Maximum time to wait in seconds
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Document snapshot
|
|
88
|
+
|
|
89
|
+
Raises:
|
|
90
|
+
FirestoreTimeoutError: If the operation takes longer than timeout_seconds
|
|
91
|
+
"""
|
|
92
|
+
loop = asyncio.get_running_loop()
|
|
93
|
+
with ThreadPoolExecutor() as executor:
|
|
94
|
+
try:
|
|
95
|
+
# Run the blocking Firestore get() operation in a thread and apply a strict timeout
|
|
96
|
+
logger.debug(f"Starting Firestore get with strict timeout of {timeout_seconds}s")
|
|
97
|
+
return await asyncio.wait_for(
|
|
98
|
+
loop.run_in_executor(executor, doc_ref.get),
|
|
99
|
+
timeout=timeout_seconds
|
|
100
|
+
)
|
|
101
|
+
except asyncio.TimeoutError:
|
|
102
|
+
error_message = f"User Status fetching for Authz timed out after {timeout_seconds} seconds, perhaps issue with Firestore Connectivity"
|
|
103
|
+
logger.error(error_message)
|
|
104
|
+
raise FirestoreTimeoutError(error_message)
|
|
105
|
+
|
|
106
|
+
# Update get_userstatus to use our new strict timeout function
|
|
68
107
|
async def get_userstatus(
|
|
69
108
|
user_uid: str,
|
|
70
|
-
db: firestore.Client,
|
|
71
|
-
force_fresh: bool = False
|
|
109
|
+
db: firestore.Client,
|
|
110
|
+
force_fresh: bool = False,
|
|
111
|
+
timeout: float = 12.0 # Default timeout but allow override
|
|
72
112
|
) -> tuple[Dict[str, Any], bool]:
|
|
73
113
|
"""
|
|
74
|
-
Fetch user status with intelligent caching
|
|
114
|
+
Fetch user status with intelligent caching and configurable timeout
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
user_uid: User ID to fetch status for
|
|
118
|
+
db: Firestore client
|
|
119
|
+
force_fresh: Whether to bypass cache
|
|
120
|
+
timeout: Timeout for Firestore operations in seconds
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
Tuple of (user status data, whether cache was used)
|
|
75
124
|
"""
|
|
76
125
|
cache_used = False
|
|
77
126
|
if not force_fresh:
|
|
@@ -85,8 +134,11 @@ async def get_userstatus(
|
|
|
85
134
|
userstatus_id = USERS_STATUS_DOC_REF + user_uid
|
|
86
135
|
user_ref = db.collection(USERS_STATUS_COLLECTION_NAME).document(userstatus_id)
|
|
87
136
|
|
|
88
|
-
|
|
89
|
-
|
|
137
|
+
logger.debug(f"Fetching user status for {user_uid} with strict timeout {timeout}s")
|
|
138
|
+
|
|
139
|
+
# Use our strict timeout wrapper instead of the native timeout parameter
|
|
140
|
+
snapshot = await get_with_strict_timeout(user_ref, timeout)
|
|
141
|
+
|
|
90
142
|
if not snapshot.exists:
|
|
91
143
|
raise ResourceNotFoundError(
|
|
92
144
|
resource_type="authorization userstatus",
|
|
@@ -101,9 +153,23 @@ async def get_userstatus(
|
|
|
101
153
|
userstatus_cache.set(user_uid, status_data)
|
|
102
154
|
return status_data, cache_used
|
|
103
155
|
|
|
156
|
+
except TimeoutError as e:
|
|
157
|
+
logger.error(f"Timeout while fetching user status for {user_uid}: {str(e)}")
|
|
158
|
+
raise ServiceError(
|
|
159
|
+
operation="fetching user status for authz",
|
|
160
|
+
error=e,
|
|
161
|
+
resource_type="userstatus",
|
|
162
|
+
resource_id=user_uid,
|
|
163
|
+
additional_info={
|
|
164
|
+
"force_fresh": force_fresh,
|
|
165
|
+
"collection": USERS_STATUS_COLLECTION_NAME,
|
|
166
|
+
"timeout_seconds": timeout
|
|
167
|
+
}
|
|
168
|
+
)
|
|
104
169
|
except ResourceNotFoundError:
|
|
105
170
|
raise
|
|
106
171
|
except Exception as e:
|
|
172
|
+
logger.error(f"Error fetching user status for {user_uid}: {str(e)}")
|
|
107
173
|
raise ServiceError(
|
|
108
174
|
operation=f"fetching user status",
|
|
109
175
|
error=e,
|
|
@@ -156,15 +222,27 @@ async def extract_request_fields(request: Request) -> Optional[List[str]]:
|
|
|
156
222
|
logger.warning(f"Could not extract fields from request body: {str(e)}")
|
|
157
223
|
return None # Return None instead of raising an error
|
|
158
224
|
|
|
225
|
+
# Main authorization function with configurable timeout
|
|
159
226
|
async def authorizeAPIRequest(
|
|
160
227
|
request: Request,
|
|
161
|
-
db: firestore.Client,
|
|
228
|
+
db: firestore.Client,
|
|
162
229
|
request_resource_fields: Optional[Iterable[str]] = None,
|
|
230
|
+
firestore_timeout: float = 15.0 # Allow specifying timeout
|
|
163
231
|
) -> Dict[str, Any]:
|
|
164
232
|
"""
|
|
165
233
|
Authorize API request based on user status and OPA policies.
|
|
166
234
|
Enhanced with credit check information.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
request: The incoming request
|
|
238
|
+
db: Firestore client
|
|
239
|
+
request_resource_fields: Fields being accessed/modified in the request
|
|
240
|
+
firestore_timeout: Timeout for Firestore operations in seconds
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
Authorization result containing decision details
|
|
167
244
|
"""
|
|
245
|
+
opa_decision = None
|
|
168
246
|
try:
|
|
169
247
|
# Extract fields for both PATCH and POST if not provided
|
|
170
248
|
if not request_resource_fields:
|
|
@@ -180,7 +258,12 @@ async def authorizeAPIRequest(
|
|
|
180
258
|
|
|
181
259
|
# Determine if we need fresh status
|
|
182
260
|
force_fresh = _should_force_fresh_status(request)
|
|
183
|
-
userstatus, cache_used = await get_userstatus(
|
|
261
|
+
userstatus, cache_used = await get_userstatus(
|
|
262
|
+
user_uid,
|
|
263
|
+
db,
|
|
264
|
+
force_fresh=force_fresh,
|
|
265
|
+
timeout=firestore_timeout # Pass the specified timeout
|
|
266
|
+
)
|
|
184
267
|
|
|
185
268
|
# Prepare authorization input that matches OPA expectations
|
|
186
269
|
# Extract required values from user status
|
|
@@ -214,7 +297,13 @@ async def authorizeAPIRequest(
|
|
|
214
297
|
# Query OPA
|
|
215
298
|
opa_url = f"{os.getenv('OPA_SERVER_URL', 'http://localhost:8181')}{os.getenv('OPA_DECISION_PATH', '/v1/data/http/authz/ingress/decision')}"
|
|
216
299
|
logger.debug(f"Attempting to connect to OPA at: {opa_url}")
|
|
217
|
-
|
|
300
|
+
|
|
301
|
+
# Debug: Print raw JSON payload to identify any potential issues
|
|
302
|
+
try:
|
|
303
|
+
payload_json = json.dumps({"input": json_safe_authz_input})
|
|
304
|
+
logger.debug(f"OPA Request JSON payload: {payload_json}")
|
|
305
|
+
except Exception as json_err:
|
|
306
|
+
logger.error(f"Error serializing OPA request payload: {json_err}")
|
|
218
307
|
|
|
219
308
|
async with httpx.AsyncClient() as client:
|
|
220
309
|
try:
|
|
@@ -236,20 +325,35 @@ async def authorizeAPIRequest(
|
|
|
236
325
|
result = response.json()
|
|
237
326
|
logger.debug(f"Parsed OPA response: {result}")
|
|
238
327
|
|
|
239
|
-
|
|
328
|
+
# Handle unusual OPA response formats
|
|
329
|
+
# Try to find "decision" field as an alternative
|
|
330
|
+
if "result" in result:
|
|
331
|
+
opa_decision = result["result"]
|
|
332
|
+
else:
|
|
333
|
+
# If we still don't have a result after all attempts, use default structure
|
|
334
|
+
logger.warning(f"OPA response missing 'result' field, using default")
|
|
335
|
+
raise HTTPException(
|
|
336
|
+
status_code=500,
|
|
337
|
+
detail="Authorization service error: OPA response format unexpected"
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
# Extract key fields from result with better default handling
|
|
341
|
+
allow = opa_decision.get("allow", False)
|
|
342
|
+
|
|
343
|
+
# Handle authorization denial
|
|
344
|
+
if not allow:
|
|
240
345
|
logger.error(f"Authorization denied: {result}")
|
|
241
346
|
raise AuthorizationError(
|
|
242
347
|
action=f"{request.method} {request.url.path}",
|
|
243
348
|
additional_info={
|
|
244
349
|
"user_uid": user_uid,
|
|
245
350
|
"resource_fields": request_resource_fields,
|
|
246
|
-
"opa_decision":
|
|
351
|
+
"opa_decision": opa_decision, # Include the full OPA decision result
|
|
352
|
+
# Include the raw result if it's different from the processed decision
|
|
353
|
+
"raw_opa_response": result if result != {"result": opa_decision} else None
|
|
247
354
|
}
|
|
248
355
|
)
|
|
249
356
|
|
|
250
|
-
# Extract credit check information from the OPA response
|
|
251
|
-
credit_check = result.get("result", {}).get("credit_check", {})
|
|
252
|
-
|
|
253
357
|
except httpx.RequestError as e:
|
|
254
358
|
logger.error(f"Failed to connect to OPA: {str(e)}")
|
|
255
359
|
raise ServiceError(
|
|
@@ -267,9 +371,7 @@ async def authorizeAPIRequest(
|
|
|
267
371
|
"used_cached_status": cache_used,
|
|
268
372
|
"required_fresh_status": force_fresh,
|
|
269
373
|
"status_retrieved_at": datetime.now(timezone.utc).isoformat(),
|
|
270
|
-
"
|
|
271
|
-
"allow_all_fields": result.get("result", {}).get("allow_all_fields", False),
|
|
272
|
-
"allowed_fields": result.get("result", {}).get("allowed_fields", [])
|
|
374
|
+
"opa_decision": opa_decision
|
|
273
375
|
}
|
|
274
376
|
|
|
275
377
|
except (AuthorizationError, ResourceNotFoundError):
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from datetime import datetime, timezone
|
|
2
|
+
from typing import Any
|
|
2
3
|
from typing import ClassVar
|
|
3
4
|
from pydantic import BaseModel, Field, ConfigDict, field_validator
|
|
4
5
|
import dateutil.parser
|
|
@@ -32,10 +33,23 @@ class BaseDataModel(BaseModel):
|
|
|
32
33
|
|
|
33
34
|
@field_validator('created_at', 'updated_at', mode='before')
|
|
34
35
|
@classmethod
|
|
35
|
-
def parse_datetime(cls, v:
|
|
36
|
-
if isinstance(v, datetime):
|
|
37
|
-
return v
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
36
|
+
def parse_datetime(cls, v: Any) -> datetime:
|
|
37
|
+
if isinstance(v, datetime): # If Firestore already gave a datetime object
|
|
38
|
+
return v # Just use it, no parsing needed
|
|
39
|
+
if isinstance(v, str): # If it's a string (e.g. from an API request, not Firestore direct)
|
|
40
|
+
try:
|
|
41
|
+
return dateutil.parser.isoparse(v)
|
|
42
|
+
except (TypeError, ValueError) as e:
|
|
43
|
+
raise ValueError(f"Invalid datetime string format: {v} - {e}")
|
|
44
|
+
# Firestore might send google.api_core.datetime_helpers.DatetimeWithNanoseconds
|
|
45
|
+
# which is a subclass of datetime.datetime, so isinstance(v, datetime) should catch it.
|
|
46
|
+
# If for some reason it's a different type not caught by isinstance(v, datetime)
|
|
47
|
+
# but has isoformat(), perhaps try that, but it's unlikely with current Firestore client.
|
|
48
|
+
# For example, if v is some custom timestamp object from an older library:
|
|
49
|
+
if hasattr(v, 'isoformat'): # Fallback for unknown datetime-like objects
|
|
50
|
+
try:
|
|
51
|
+
return dateutil.parser.isoparse(v.isoformat())
|
|
52
|
+
except Exception as e:
|
|
53
|
+
raise ValueError(f"Could not parse datetime-like object: {v} - {e}")
|
|
54
|
+
|
|
55
|
+
raise ValueError(f"Unsupported type for datetime parsing: {type(v)} value: {v}")
|
|
@@ -51,6 +51,10 @@ class UserStatus(BaseDataModel):
|
|
|
51
51
|
DOMAIN: ClassVar[str] = "_".join(list_as_lower_strings(Layer.PULSE_APP, Module.CORE.name, Subject.USER.name))
|
|
52
52
|
OBJ_REF: ClassVar[str] = "userstatus"
|
|
53
53
|
|
|
54
|
+
# Centralized collection name and document ID prefix
|
|
55
|
+
COLLECTION_NAME: ClassVar[str] = "papp_core_user_userstatuss"
|
|
56
|
+
|
|
57
|
+
|
|
54
58
|
# System-managed fields
|
|
55
59
|
schema_version: float = Field(
|
|
56
60
|
default=VERSION,
|
|
@@ -1,6 +1,14 @@
|
|
|
1
|
-
|
|
1
|
+
"""Service utilities for shared core."""
|
|
2
|
+
# Import existing components
|
|
3
|
+
from ipulse_shared_core_ftredge.services.base_service_exceptions import (
|
|
4
|
+
BaseServiceException, ServiceError, ValidationError, ResourceNotFoundError, AuthorizationError
|
|
5
|
+
)
|
|
6
|
+
from ipulse_shared_core_ftredge.services.servicemon import Servicemon
|
|
7
|
+
from ipulse_shared_core_ftredge.services.base_firestore_service import BaseFirestoreService
|
|
8
|
+
from ipulse_shared_core_ftredge.services.cache_aware_firestore_service import CacheAwareFirestoreService
|
|
2
9
|
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
10
|
+
__all__ = [
|
|
11
|
+
'AuthorizationError', 'BaseServiceException', 'ServiceError', 'ValidationError',
|
|
12
|
+
'ResourceNotFoundError', 'BaseFirestoreService',
|
|
13
|
+
'CacheAwareFirestoreService'
|
|
14
|
+
]
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
""" Base class for Firestore services with common CRUD operations """
|
|
2
|
-
from typing import Dict, Any, List, TypeVar, Generic
|
|
2
|
+
from typing import Dict, Any, List, TypeVar, Generic, Optional
|
|
3
3
|
import logging
|
|
4
|
+
import time
|
|
4
5
|
from datetime import datetime, timezone
|
|
5
6
|
from pydantic import BaseModel
|
|
6
7
|
from google.cloud import firestore
|
|
@@ -11,11 +12,20 @@ T = TypeVar('T', bound=BaseModel)
|
|
|
11
12
|
class BaseFirestoreService(Generic[T]):
|
|
12
13
|
"""Base class for Firestore services with common CRUD operations"""
|
|
13
14
|
|
|
14
|
-
def __init__(
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
db: firestore.Client,
|
|
18
|
+
collection_name: str,
|
|
19
|
+
resource_type: str,
|
|
20
|
+
logger: logging.Logger,
|
|
21
|
+
timeout: float = 15.0 # Default to 15 seconds, but allow override
|
|
22
|
+
):
|
|
15
23
|
self.db = db
|
|
16
24
|
self.collection_name = collection_name
|
|
17
25
|
self.resource_type = resource_type
|
|
18
26
|
self.logger = logger
|
|
27
|
+
self.timeout = timeout # Store the timeout as an instance attribute
|
|
28
|
+
self.logger.info(f"Initialized {self.resource_type} service with timeout={timeout}s")
|
|
19
29
|
|
|
20
30
|
async def create_document(self, doc_id: str, data: T, creator_uid: str) -> Dict[str, Any]:
|
|
21
31
|
"""Standard create method with audit fields"""
|
|
@@ -32,7 +42,8 @@ class BaseFirestoreService(Generic[T]):
|
|
|
32
42
|
})
|
|
33
43
|
|
|
34
44
|
doc_ref = self.db.collection(self.collection_name).document(doc_id)
|
|
35
|
-
|
|
45
|
+
# Apply timeout to the set operation
|
|
46
|
+
doc_ref.set(doc_data, timeout=self.timeout)
|
|
36
47
|
|
|
37
48
|
self.logger.info(f"Created {self.resource_type}: {doc_id}")
|
|
38
49
|
return doc_data
|
|
@@ -66,7 +77,8 @@ class BaseFirestoreService(Generic[T]):
|
|
|
66
77
|
batch.set(doc_ref, doc_data)
|
|
67
78
|
created_docs.append(doc_data)
|
|
68
79
|
|
|
69
|
-
|
|
80
|
+
# Apply timeout to the commit operation
|
|
81
|
+
batch.commit(timeout=self.timeout)
|
|
70
82
|
self.logger.info(f"Created {len(documents)} {self.resource_type}s in batch")
|
|
71
83
|
return created_docs
|
|
72
84
|
|
|
@@ -81,24 +93,48 @@ class BaseFirestoreService(Generic[T]):
|
|
|
81
93
|
|
|
82
94
|
async def get_document(self, doc_id: str) -> Dict[str, Any]:
|
|
83
95
|
"""Get a document by ID with standardized error handling"""
|
|
84
|
-
|
|
85
|
-
|
|
96
|
+
self.logger.debug(f"Getting {self.resource_type} document: {doc_id} with timeout={self.timeout}s")
|
|
97
|
+
start_time = time.time()
|
|
86
98
|
|
|
87
|
-
|
|
88
|
-
|
|
99
|
+
try:
|
|
100
|
+
doc_ref = self.db.collection(self.collection_name).document(doc_id)
|
|
101
|
+
|
|
102
|
+
# Apply timeout to the get operation
|
|
103
|
+
doc = doc_ref.get(timeout=self.timeout)
|
|
104
|
+
|
|
105
|
+
elapsed = (time.time() - start_time) * 1000
|
|
106
|
+
self.logger.debug(f"Firestore get for {doc_id} completed in {elapsed:.2f}ms")
|
|
107
|
+
|
|
108
|
+
if not doc.exists:
|
|
109
|
+
self.logger.warning(f"Document {doc_id} not found in {self.collection_name}")
|
|
110
|
+
raise ResourceNotFoundError(
|
|
111
|
+
resource_type=self.resource_type,
|
|
112
|
+
resource_id=doc_id,
|
|
113
|
+
additional_info={"collection": self.collection_name}
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
return doc.to_dict()
|
|
117
|
+
|
|
118
|
+
except ResourceNotFoundError:
|
|
119
|
+
raise
|
|
120
|
+
except Exception as e:
|
|
121
|
+
elapsed = (time.time() - start_time) * 1000
|
|
122
|
+
self.logger.error(f"Error getting document {doc_id} after {elapsed:.2f}ms: {str(e)}", exc_info=True)
|
|
123
|
+
raise ServiceError(
|
|
124
|
+
operation=f"retrieving {self.resource_type}",
|
|
125
|
+
error=e,
|
|
89
126
|
resource_type=self.resource_type,
|
|
90
127
|
resource_id=doc_id,
|
|
91
|
-
additional_info={"collection": self.collection_name}
|
|
92
|
-
)
|
|
93
|
-
|
|
94
|
-
return doc.to_dict()
|
|
128
|
+
additional_info={"collection": self.collection_name, "timeout": self.timeout}
|
|
129
|
+
) from e
|
|
95
130
|
|
|
96
131
|
async def update_document(self, doc_id: str, update_data: Dict[str, Any], updater_uid: str) -> Dict[str, Any]:
|
|
97
132
|
"""Standard update method with validation and audit fields"""
|
|
98
133
|
try:
|
|
99
134
|
doc_ref = self.db.collection(self.collection_name).document(doc_id)
|
|
100
135
|
|
|
101
|
-
|
|
136
|
+
# Apply timeout to the get operation
|
|
137
|
+
if not doc_ref.get(timeout=self.timeout).exists:
|
|
102
138
|
raise ResourceNotFoundError(
|
|
103
139
|
resource_type=self.resource_type,
|
|
104
140
|
resource_id=doc_id,
|
|
@@ -113,8 +149,10 @@ class BaseFirestoreService(Generic[T]):
|
|
|
113
149
|
'updated_by': updater_uid
|
|
114
150
|
})
|
|
115
151
|
|
|
116
|
-
|
|
117
|
-
|
|
152
|
+
# Apply timeout to the update operation
|
|
153
|
+
doc_ref.update(valid_fields, timeout=self.timeout)
|
|
154
|
+
# Apply timeout to the get operation
|
|
155
|
+
return doc_ref.get(timeout=self.timeout).to_dict()
|
|
118
156
|
|
|
119
157
|
except (ResourceNotFoundError, ValidationError):
|
|
120
158
|
raise
|
|
@@ -131,13 +169,15 @@ class BaseFirestoreService(Generic[T]):
|
|
|
131
169
|
"""Standard delete method"""
|
|
132
170
|
try:
|
|
133
171
|
doc_ref = self.db.collection(self.collection_name).document(doc_id)
|
|
134
|
-
|
|
172
|
+
# Apply timeout to the get operation
|
|
173
|
+
if not doc_ref.get(timeout=self.timeout).exists:
|
|
135
174
|
raise ResourceNotFoundError(
|
|
136
175
|
resource_type=self.resource_type,
|
|
137
176
|
resource_id=doc_id
|
|
138
177
|
)
|
|
139
178
|
|
|
140
|
-
|
|
179
|
+
# Apply timeout to the delete operation
|
|
180
|
+
doc_ref.delete(timeout=self.timeout)
|
|
141
181
|
self.logger.info(f"Deleted {self.resource_type}: {doc_id}")
|
|
142
182
|
|
|
143
183
|
except ResourceNotFoundError:
|
|
@@ -151,6 +191,44 @@ class BaseFirestoreService(Generic[T]):
|
|
|
151
191
|
resource_id=doc_id
|
|
152
192
|
) from e
|
|
153
193
|
|
|
194
|
+
# Add query method with timeout
|
|
195
|
+
async def query_documents(
|
|
196
|
+
self,
|
|
197
|
+
filters: Optional[List[tuple]] = None,
|
|
198
|
+
limit: Optional[int] = None,
|
|
199
|
+
order_by: Optional[tuple] = None
|
|
200
|
+
) -> List[Dict[str, Any]]:
|
|
201
|
+
"""Query documents with filters, limit, and ordering"""
|
|
202
|
+
try:
|
|
203
|
+
# Start with the collection reference
|
|
204
|
+
query = self.db.collection(self.collection_name)
|
|
205
|
+
|
|
206
|
+
# Apply filters if provided
|
|
207
|
+
if filters:
|
|
208
|
+
for field, op, value in filters:
|
|
209
|
+
query = query.where(field=field, op_string=op, value=value)
|
|
210
|
+
|
|
211
|
+
# Apply ordering if provided
|
|
212
|
+
if order_by:
|
|
213
|
+
field, direction = order_by
|
|
214
|
+
query = query.order_by(field, direction=direction)
|
|
215
|
+
|
|
216
|
+
# Apply limit if provided
|
|
217
|
+
if limit:
|
|
218
|
+
query = query.limit(limit)
|
|
219
|
+
|
|
220
|
+
# Execute query with timeout
|
|
221
|
+
docs = query.stream(timeout=self.timeout)
|
|
222
|
+
return [doc.to_dict() for doc in docs]
|
|
223
|
+
|
|
224
|
+
except Exception as e:
|
|
225
|
+
self.logger.error(f"Error querying {self.resource_type}: {e}", exc_info=True)
|
|
226
|
+
raise ServiceError(
|
|
227
|
+
operation=f"querying {self.resource_type}",
|
|
228
|
+
error=e,
|
|
229
|
+
resource_type=self.resource_type
|
|
230
|
+
) from e
|
|
231
|
+
|
|
154
232
|
def _validate_update_fields(self, update_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
155
233
|
"""Centralized update fields validation"""
|
|
156
234
|
if not isinstance(update_data, dict):
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
"""Base service with built-in cache awareness for Firestore operations."""
|
|
2
|
+
|
|
3
|
+
from typing import Dict, Any, List, Optional, TypeVar, Generic
|
|
4
|
+
from google.cloud import firestore
|
|
5
|
+
from pydantic import BaseModel
|
|
6
|
+
import logging
|
|
7
|
+
from ipulse_shared_core_ftredge.cache.shared_cache import SharedCache
|
|
8
|
+
from ipulse_shared_core_ftredge.services import BaseFirestoreService
|
|
9
|
+
|
|
10
|
+
T = TypeVar('T', bound=BaseModel)
|
|
11
|
+
|
|
12
|
+
class CacheAwareFirestoreService(BaseFirestoreService, Generic[T]):
|
|
13
|
+
"""
|
|
14
|
+
Base service class that integrates caching with Firestore operations.
|
|
15
|
+
This allows services to inherit cache-aware CRUD methods without reimplementing them.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
db: firestore.Client,
|
|
21
|
+
collection_name: str,
|
|
22
|
+
resource_type: str,
|
|
23
|
+
logger: logging.Logger,
|
|
24
|
+
document_cache: Optional[SharedCache] = None,
|
|
25
|
+
collection_cache: Optional[SharedCache] = None,
|
|
26
|
+
timeout: float = 15.0
|
|
27
|
+
):
|
|
28
|
+
"""
|
|
29
|
+
Initialize the service with optional cache instances.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
db: Firestore client
|
|
33
|
+
collection_name: Firestore collection name
|
|
34
|
+
resource_type: Resource type for error messages
|
|
35
|
+
logger: Logger instance
|
|
36
|
+
document_cache: Cache for individual documents (optional)
|
|
37
|
+
collection_cache: Cache for collection-level queries (optional)
|
|
38
|
+
timeout: Firestore operation timeout in seconds
|
|
39
|
+
"""
|
|
40
|
+
super().__init__(
|
|
41
|
+
db=db,
|
|
42
|
+
collection_name=collection_name,
|
|
43
|
+
resource_type=resource_type,
|
|
44
|
+
logger=logger,
|
|
45
|
+
timeout=timeout
|
|
46
|
+
)
|
|
47
|
+
self.document_cache = document_cache
|
|
48
|
+
self.collection_cache = collection_cache
|
|
49
|
+
|
|
50
|
+
# Log cache configuration
|
|
51
|
+
if document_cache:
|
|
52
|
+
self.logger.info(f"Document cache enabled for {resource_type}: {document_cache.name}")
|
|
53
|
+
if collection_cache:
|
|
54
|
+
self.logger.info(f"Collection cache enabled for {resource_type}: {collection_cache.name}")
|
|
55
|
+
|
|
56
|
+
async def create_document(self, doc_id: str, data: T, creator_uid: str) -> Dict[str, Any]:
|
|
57
|
+
"""Create a document and invalidate relevant caches."""
|
|
58
|
+
result = await super().create_document(doc_id, data, creator_uid)
|
|
59
|
+
|
|
60
|
+
# Invalidate document cache if it exists
|
|
61
|
+
self._invalidate_document_cache(doc_id)
|
|
62
|
+
|
|
63
|
+
# Invalidate collection cache if it exists
|
|
64
|
+
self._invalidate_collection_cache()
|
|
65
|
+
|
|
66
|
+
return result
|
|
67
|
+
|
|
68
|
+
async def update_document(self, doc_id: str, update_data: Dict[str, Any], updater_uid: str) -> Dict[str, Any]:
|
|
69
|
+
"""Update a document and invalidate relevant caches."""
|
|
70
|
+
result = await super().update_document(doc_id, update_data, updater_uid)
|
|
71
|
+
|
|
72
|
+
# Invalidate document cache if it exists
|
|
73
|
+
self._invalidate_document_cache(doc_id)
|
|
74
|
+
|
|
75
|
+
# Invalidate collection cache if it exists
|
|
76
|
+
self._invalidate_collection_cache()
|
|
77
|
+
|
|
78
|
+
return result
|
|
79
|
+
|
|
80
|
+
async def delete_document(self, doc_id: str, deleter_uid: Optional[str] = None) -> None:
|
|
81
|
+
"""Delete a document and invalidate relevant caches."""
|
|
82
|
+
# Invalidate caches before deletion to handle potential failures
|
|
83
|
+
self._invalidate_document_cache(doc_id)
|
|
84
|
+
self._invalidate_collection_cache()
|
|
85
|
+
|
|
86
|
+
# Delete the document
|
|
87
|
+
await super().delete_document(doc_id)
|
|
88
|
+
|
|
89
|
+
async def get_document(self, doc_id: str) -> Dict[str, Any]:
|
|
90
|
+
"""
|
|
91
|
+
Get a document by ID with caching if available.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
doc_id: The document ID to fetch
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
The document data
|
|
98
|
+
"""
|
|
99
|
+
# Check document cache first if available
|
|
100
|
+
if self.document_cache:
|
|
101
|
+
cached_data = self.document_cache.get(doc_id)
|
|
102
|
+
if cached_data is not None:
|
|
103
|
+
self.logger.debug(f"Cache hit for document {doc_id}")
|
|
104
|
+
return cached_data
|
|
105
|
+
|
|
106
|
+
# Cache miss or no cache configured, fetch from Firestore
|
|
107
|
+
doc_data = await super().get_document(doc_id)
|
|
108
|
+
|
|
109
|
+
# Store in cache if available
|
|
110
|
+
if self.document_cache and doc_data:
|
|
111
|
+
# Make sure ID is included in the cached data
|
|
112
|
+
if 'id' not in doc_data:
|
|
113
|
+
doc_data['id'] = doc_id
|
|
114
|
+
self.document_cache.set(doc_id, doc_data)
|
|
115
|
+
self.logger.debug(f"Cached document {doc_id}")
|
|
116
|
+
|
|
117
|
+
return doc_data
|
|
118
|
+
|
|
119
|
+
async def get_all_documents(self, cache_key: str = "all_documents") -> List[Dict[str, Any]]:
|
|
120
|
+
"""
|
|
121
|
+
Get all documents in the collection with caching.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
cache_key: The key to use for caching the full collection
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
List of all documents in the collection
|
|
128
|
+
"""
|
|
129
|
+
# Check collection cache first if available
|
|
130
|
+
if self.collection_cache:
|
|
131
|
+
cached_data = self.collection_cache.get(cache_key)
|
|
132
|
+
if cached_data is not None:
|
|
133
|
+
self.logger.debug(f"Cache hit for collection query: {cache_key}")
|
|
134
|
+
return cached_data
|
|
135
|
+
|
|
136
|
+
# Cache miss or no cache configured, fetch from Firestore
|
|
137
|
+
query = self.db.collection(self.collection_name).stream(timeout=self.timeout)
|
|
138
|
+
documents = []
|
|
139
|
+
|
|
140
|
+
for doc in query:
|
|
141
|
+
doc_data = doc.to_dict()
|
|
142
|
+
|
|
143
|
+
# Make sure ID is included in the data
|
|
144
|
+
if 'id' not in doc_data:
|
|
145
|
+
doc_data['id'] = doc.id
|
|
146
|
+
|
|
147
|
+
# Also update the document cache if configured
|
|
148
|
+
if self.document_cache:
|
|
149
|
+
self.document_cache.set(doc.id, doc_data)
|
|
150
|
+
|
|
151
|
+
documents.append(doc_data)
|
|
152
|
+
|
|
153
|
+
# Store in collection cache if available
|
|
154
|
+
if self.collection_cache:
|
|
155
|
+
self.collection_cache.set(cache_key, documents)
|
|
156
|
+
self.logger.debug(f"Cached collection query result: {cache_key} with {len(documents)} documents")
|
|
157
|
+
|
|
158
|
+
return documents
|
|
159
|
+
|
|
160
|
+
def _invalidate_document_cache(self, doc_id: str) -> None:
|
|
161
|
+
"""Invalidate the document cache for a specific document ID."""
|
|
162
|
+
if self.document_cache:
|
|
163
|
+
self.document_cache.invalidate(doc_id)
|
|
164
|
+
self.logger.debug(f"Invalidated document cache for {doc_id}")
|
|
165
|
+
|
|
166
|
+
def _invalidate_collection_cache(self, cache_key: str = "all_documents") -> None:
|
|
167
|
+
"""Invalidate the collection cache."""
|
|
168
|
+
if self.collection_cache:
|
|
169
|
+
# For single key collection cache
|
|
170
|
+
self.collection_cache.invalidate(cache_key)
|
|
171
|
+
self.logger.debug(f"Invalidated collection cache: {cache_key}")
|
|
@@ -0,0 +1,270 @@
|
|
|
1
|
+
"""Service for managing credit operations in a generic way."""
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Dict, Any, Optional, Tuple
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
from google.cloud import firestore
|
|
6
|
+
from ipulse_shared_core_ftredge.services import ServiceError, ResourceNotFoundError, ValidationError
|
|
7
|
+
from ipulse_shared_core_ftredge.models.user_status import UserStatus
|
|
8
|
+
|
|
9
|
+
# Default Firestore timeout if not provided by the consuming application
|
|
10
|
+
DEFAULT_FIRESTORE_TIMEOUT = 15.0
|
|
11
|
+
|
|
12
|
+
class CreditService:
|
|
13
|
+
"""
|
|
14
|
+
Service class for credit operations.
|
|
15
|
+
Designed to be project-agnostic and directly uses UserStatus model constants.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
db: firestore.Client,
|
|
21
|
+
logger: Optional[logging.Logger] = None,
|
|
22
|
+
firestore_timeout: float = DEFAULT_FIRESTORE_TIMEOUT
|
|
23
|
+
):
|
|
24
|
+
"""
|
|
25
|
+
Initialize the credit service.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
db: Firestore client.
|
|
29
|
+
logger: Optional logger instance. Defaults to a new logger for this module.
|
|
30
|
+
firestore_timeout: Timeout for Firestore operations in seconds.
|
|
31
|
+
"""
|
|
32
|
+
self.db = db
|
|
33
|
+
# Use UserStatus constants directly
|
|
34
|
+
self.users_status_collection_name = UserStatus.COLLECTION_NAME
|
|
35
|
+
self.user_status_doc_prefix = f"{UserStatus.OBJ_REF}_" # Append underscore to OBJ_REF
|
|
36
|
+
self.logger = logger or logging.getLogger(__name__)
|
|
37
|
+
self.timeout = firestore_timeout
|
|
38
|
+
|
|
39
|
+
self.logger.info(
|
|
40
|
+
f"CreditService initialized using UserStatus constants. Collection: {self.users_status_collection_name}, "
|
|
41
|
+
f"Doc Prefix: {self.user_status_doc_prefix}, Timeout: {self.timeout}s"
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
async def verify_credits(
|
|
45
|
+
self,
|
|
46
|
+
user_uid: str,
|
|
47
|
+
required_credits_for_resource: float,
|
|
48
|
+
pre_fetched_user_credits: Optional[Dict[str, float]] = None
|
|
49
|
+
) -> Tuple[bool, Dict[str, Any]]:
|
|
50
|
+
"""
|
|
51
|
+
Verify if a user has enough credits for an operation.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
user_uid: The user's UID.
|
|
55
|
+
required_credits_for_resource: The number of credits required for the operation.
|
|
56
|
+
pre_fetched_user_credits: Optional dict with pre-fetched credit info.
|
|
57
|
+
(keys: 'sbscrptn_based_insight_credits', 'extra_insight_credits')
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
Tuple of (has_enough_credits, user_status_data) where user_status_data
|
|
61
|
+
will be a dict with keys 'sbscrptn_based_insight_credits' and 'extra_insight_credits'.
|
|
62
|
+
|
|
63
|
+
Raises:
|
|
64
|
+
ValidationError: If required_credits_for_resource is None (pricing not properly configured).
|
|
65
|
+
"""
|
|
66
|
+
self.logger.info(
|
|
67
|
+
f"verify_credits called for user {user_uid}, "
|
|
68
|
+
f"required_credits={required_credits_for_resource}, "
|
|
69
|
+
f"pre_fetched_credits={pre_fetched_user_credits}"
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
if required_credits_for_resource is None:
|
|
73
|
+
self.logger.error(f"Credit cost is None for user {user_uid}, pricing not properly configured")
|
|
74
|
+
raise ValidationError(
|
|
75
|
+
resource_type="credit_cost",
|
|
76
|
+
detail="Credit cost is not configured for this resource",
|
|
77
|
+
resource_id=None, # Resource ID might not be known here, or could be passed
|
|
78
|
+
additional_info={"user_uid": user_uid}
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
if required_credits_for_resource <= 0:
|
|
82
|
+
self.logger.info(f"No credits required for user {user_uid}, bypassing credit verification")
|
|
83
|
+
return True, {"sbscrptn_based_insight_credits": 0, "extra_insight_credits": 0}
|
|
84
|
+
|
|
85
|
+
if pre_fetched_user_credits is not None:
|
|
86
|
+
self.logger.info(f"Using pre-fetched credit info for user {user_uid}")
|
|
87
|
+
subscription_credits = pre_fetched_user_credits.get("sbscrptn_based_insight_credits", 0)
|
|
88
|
+
extra_credits = pre_fetched_user_credits.get("extra_insight_credits", 0)
|
|
89
|
+
total_credits = subscription_credits + extra_credits
|
|
90
|
+
|
|
91
|
+
self.logger.info(
|
|
92
|
+
f"User {user_uid} has {total_credits} total pre-fetched credits "
|
|
93
|
+
f"(subscription: {subscription_credits}, extra: {extra_credits})"
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
userstatus_data_to_return = {
|
|
97
|
+
"sbscrptn_based_insight_credits": subscription_credits,
|
|
98
|
+
"extra_insight_credits": extra_credits
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
has_enough_credits = total_credits >= required_credits_for_resource
|
|
102
|
+
return has_enough_credits, userstatus_data_to_return
|
|
103
|
+
|
|
104
|
+
try:
|
|
105
|
+
self.logger.info(
|
|
106
|
+
f"Fetching user status from Firestore for user {user_uid} (collection: {self.users_status_collection_name})"
|
|
107
|
+
)
|
|
108
|
+
full_userstatus_doc = await self._get_userstatus(user_uid)
|
|
109
|
+
|
|
110
|
+
subscription_credits = full_userstatus_doc.get("sbscrptn_based_insight_credits", 0)
|
|
111
|
+
extra_credits = full_userstatus_doc.get("extra_insight_credits", 0)
|
|
112
|
+
total_credits = subscription_credits + extra_credits
|
|
113
|
+
|
|
114
|
+
self.logger.info(
|
|
115
|
+
f"User {user_uid} has {total_credits} total credits from Firestore "
|
|
116
|
+
f"(subscription: {subscription_credits}, extra: {extra_credits})"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
has_enough_credits = total_credits >= required_credits_for_resource
|
|
120
|
+
|
|
121
|
+
userstatus_data_to_return = {
|
|
122
|
+
"sbscrptn_based_insight_credits": subscription_credits,
|
|
123
|
+
"extra_insight_credits": extra_credits
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
return has_enough_credits, userstatus_data_to_return
|
|
127
|
+
|
|
128
|
+
except ResourceNotFoundError:
|
|
129
|
+
self.logger.warning(f"User status not found for {user_uid} in {self.users_status_collection_name}. Assuming no credits.")
|
|
130
|
+
return False, {"sbscrptn_based_insight_credits": 0, "extra_insight_credits": 0}
|
|
131
|
+
except Exception as e:
|
|
132
|
+
self.logger.error(f"Error verifying credits for user {user_uid}: {str(e)}")
|
|
133
|
+
raise ServiceError(
|
|
134
|
+
operation="verifying credits",
|
|
135
|
+
error=e,
|
|
136
|
+
resource_type="user_credits",
|
|
137
|
+
resource_id=user_uid,
|
|
138
|
+
additional_info={"credits_to_charge": required_credits_for_resource}
|
|
139
|
+
) from e
|
|
140
|
+
|
|
141
|
+
async def charge_credits(self, user_uid: str, credits_to_charge: Optional[float], operation_details: str) -> bool:
|
|
142
|
+
"""
|
|
143
|
+
Charge a user's credits for an operation.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
user_uid: The user's UID.
|
|
147
|
+
credits_to_charge: The number of credits to charge.
|
|
148
|
+
operation_details: Details about the operation (for logging).
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
Whether the charging was successful.
|
|
152
|
+
|
|
153
|
+
Raises:
|
|
154
|
+
ValidationError: If credits_to_charge is None (pricing not properly configured).
|
|
155
|
+
"""
|
|
156
|
+
if credits_to_charge is None:
|
|
157
|
+
self.logger.error(f"Credit cost is None for user {user_uid} (charge_credits), pricing not properly configured")
|
|
158
|
+
raise ValidationError(
|
|
159
|
+
resource_type="credit_cost",
|
|
160
|
+
detail="Credit cost is not configured for this resource (charge_credits)",
|
|
161
|
+
resource_id=None,
|
|
162
|
+
additional_info={"user_uid": user_uid}
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
if credits_to_charge == 0:
|
|
166
|
+
self.logger.info(f"No credits to charge for user {user_uid}, operation: {operation_details}")
|
|
167
|
+
return True
|
|
168
|
+
|
|
169
|
+
try:
|
|
170
|
+
userstatus_id = f"{self.user_status_doc_prefix}{user_uid}"
|
|
171
|
+
user_ref = self.db.collection(self.users_status_collection_name).document(userstatus_id)
|
|
172
|
+
|
|
173
|
+
transaction = self.db.transaction()
|
|
174
|
+
|
|
175
|
+
@firestore.transactional
|
|
176
|
+
def update_credits_transaction(transaction_obj, current_user_ref):
|
|
177
|
+
user_doc = current_user_ref.get(transaction=transaction_obj)
|
|
178
|
+
if not user_doc.exists:
|
|
179
|
+
self.logger.warning(
|
|
180
|
+
f"Cannot charge credits - user status not found for {user_uid} in {self.users_status_collection_name}"
|
|
181
|
+
)
|
|
182
|
+
return False
|
|
183
|
+
|
|
184
|
+
userstatus = user_doc.to_dict()
|
|
185
|
+
|
|
186
|
+
subscription_credits = userstatus.get("sbscrptn_based_insight_credits", 0)
|
|
187
|
+
extra_credits = userstatus.get("extra_insight_credits", 0)
|
|
188
|
+
total_credits = subscription_credits + extra_credits
|
|
189
|
+
|
|
190
|
+
if total_credits < credits_to_charge:
|
|
191
|
+
self.logger.warning(
|
|
192
|
+
f"Insufficient credits for user {user_uid} during transaction: "
|
|
193
|
+
f"has {total_credits}, needs {credits_to_charge}"
|
|
194
|
+
)
|
|
195
|
+
return False
|
|
196
|
+
|
|
197
|
+
subscription_credits_to_charge = min(subscription_credits, credits_to_charge)
|
|
198
|
+
extra_credits_to_charge = credits_to_charge - subscription_credits_to_charge
|
|
199
|
+
|
|
200
|
+
update_data = {
|
|
201
|
+
"updated_at": datetime.now(timezone.utc).isoformat(),
|
|
202
|
+
"updated_by": "credit_service" # Consider making this configurable or more generic
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
if subscription_credits_to_charge > 0:
|
|
206
|
+
update_data["sbscrptn_based_insight_credits"] = firestore.Increment(-subscription_credits_to_charge)
|
|
207
|
+
update_data["sbscrptn_based_insight_credits_updtd_on"] = datetime.now(timezone.utc).isoformat()
|
|
208
|
+
|
|
209
|
+
if extra_credits_to_charge > 0:
|
|
210
|
+
update_data["extra_insight_credits"] = firestore.Increment(-extra_credits_to_charge)
|
|
211
|
+
update_data["extra_insight_credits_updtd_on"] = datetime.now(timezone.utc).isoformat()
|
|
212
|
+
|
|
213
|
+
transaction_obj.update(current_user_ref, update_data)
|
|
214
|
+
return True
|
|
215
|
+
|
|
216
|
+
success = update_credits_transaction(transaction, user_ref)
|
|
217
|
+
|
|
218
|
+
if success:
|
|
219
|
+
self.logger.info(
|
|
220
|
+
f"Successfully charged {credits_to_charge} credits for user {user_uid}. "
|
|
221
|
+
f"Operation: {operation_details}"
|
|
222
|
+
)
|
|
223
|
+
else:
|
|
224
|
+
self.logger.warning(
|
|
225
|
+
f"Failed to charge {credits_to_charge} credits for user {user_uid} (transaction outcome). "
|
|
226
|
+
f"Operation: {operation_details}"
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
return success
|
|
230
|
+
|
|
231
|
+
except Exception as e:
|
|
232
|
+
self.logger.error(f"Error charging credits for user {user_uid}: {str(e)}")
|
|
233
|
+
raise ServiceError(
|
|
234
|
+
operation="charging credits",
|
|
235
|
+
error=e,
|
|
236
|
+
resource_type="user_credits",
|
|
237
|
+
resource_id=user_uid,
|
|
238
|
+
additional_info={"credits_to_charge": credits_to_charge}
|
|
239
|
+
) from e
|
|
240
|
+
|
|
241
|
+
async def _get_userstatus(self, user_uid: str) -> Dict[str, Any]:
|
|
242
|
+
"""Get a user's status document."""
|
|
243
|
+
try:
|
|
244
|
+
userstatus_id = f"{self.user_status_doc_prefix}{user_uid}"
|
|
245
|
+
doc_ref = self.db.collection(self.users_status_collection_name).document(userstatus_id)
|
|
246
|
+
|
|
247
|
+
# Using the timeout value set during initialization
|
|
248
|
+
doc = await doc_ref.get(timeout=self.timeout)
|
|
249
|
+
|
|
250
|
+
if not doc.exists:
|
|
251
|
+
raise ResourceNotFoundError(
|
|
252
|
+
resource_type="user_status", # Generic resource type
|
|
253
|
+
resource_id=userstatus_id,
|
|
254
|
+
additional_info={"collection": self.users_status_collection_name}
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
return doc.to_dict()
|
|
258
|
+
|
|
259
|
+
except ResourceNotFoundError:
|
|
260
|
+
raise
|
|
261
|
+
except Exception as e: # Catch generic Exception to handle potential timeout errors from Firestore client
|
|
262
|
+
self.logger.error(f"Error getting user status for {user_uid} from {self.users_status_collection_name}: {str(e)}")
|
|
263
|
+
raise ServiceError(
|
|
264
|
+
operation="getting user status",
|
|
265
|
+
error=e,
|
|
266
|
+
resource_type="user_status",
|
|
267
|
+
resource_id=user_uid,
|
|
268
|
+
additional_info={"collection": self.users_status_collection_name}
|
|
269
|
+
) from e
|
|
270
|
+
|
{ipulse_shared_core_ftredge-11.1.1.dist-info → ipulse_shared_core_ftredge-13.0.1.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ipulse_shared_core_ftredge
|
|
3
|
-
Version:
|
|
3
|
+
Version: 13.0.1
|
|
4
4
|
Summary: Shared Core models and Logger util for the Pulse platform project. Using AI for financial advisory and investment management.
|
|
5
5
|
Home-page: https://github.com/TheFutureEdge/ipulse_shared_core
|
|
6
6
|
Author: Russlan Ramdowar
|
|
@@ -13,7 +13,7 @@ Requires-Dist: pydantic[email]~=2.5
|
|
|
13
13
|
Requires-Dist: python-dateutil~=2.8
|
|
14
14
|
Requires-Dist: fastapi~=0.115.8
|
|
15
15
|
Requires-Dist: pytest
|
|
16
|
-
Requires-Dist: ipulse_shared_base_ftredge
|
|
16
|
+
Requires-Dist: ipulse_shared_base_ftredge==6.5.1
|
|
17
17
|
Dynamic: author
|
|
18
18
|
Dynamic: classifier
|
|
19
19
|
Dynamic: home-page
|
{ipulse_shared_core_ftredge-11.1.1.dist-info → ipulse_shared_core_ftredge-13.0.1.dist-info}/RECORD
RENAMED
|
@@ -1,28 +1,32 @@
|
|
|
1
|
-
ipulse_shared_core_ftredge/__init__.py,sha256=
|
|
1
|
+
ipulse_shared_core_ftredge/__init__.py,sha256=Bj1WgZq6EmiZeFC-3gYludUpoWgsUrRq1NME5nMN22Q,501
|
|
2
|
+
ipulse_shared_core_ftredge/cache/__init__.py,sha256=i2fPojmZiBwAoY5ovnnnME9USl4bi8MRPYkAgEfACfI,136
|
|
3
|
+
ipulse_shared_core_ftredge/cache/shared_cache.py,sha256=pDHJuMRU6zkqbykaK2ldpyVmUHLa0TAI4Xu3P9M-0B0,9454
|
|
2
4
|
ipulse_shared_core_ftredge/dependencies/__init__.py,sha256=HGsR8HUguKTfjz_BorCILS4izX8CAjG-apE0kIPE0Yo,68
|
|
3
5
|
ipulse_shared_core_ftredge/dependencies/auth_firebase_token_validation.py,sha256=EFWyhoVOI0tGYOWqN5St4JNIy4cMwpxeBhKdjOwEfbg,1888
|
|
4
6
|
ipulse_shared_core_ftredge/dependencies/auth_protected_router.py,sha256=em5D5tE7OkgZmuCtYCKuUAnIZCgRJhCF8Ye5QmtGWlk,1807
|
|
5
|
-
ipulse_shared_core_ftredge/dependencies/authz_for_apis.py,sha256=
|
|
7
|
+
ipulse_shared_core_ftredge/dependencies/authz_for_apis.py,sha256=e_UZ_jY1xhC3XX8r3xffb06t6buLfXT2SQf46-DXVoo,15977
|
|
6
8
|
ipulse_shared_core_ftredge/dependencies/firestore_client.py,sha256=VbTb121nsc9EZPd1RDEsHBLW5pIiVw6Wdo2JFL4afMg,714
|
|
7
9
|
ipulse_shared_core_ftredge/models/__init__.py,sha256=cf2P65BXRQoOrcuvlbmT6yW50U7wyj8ZNvHTuTvlETo,344
|
|
8
10
|
ipulse_shared_core_ftredge/models/base_api_response.py,sha256=WOHxtv_FEk5MKzXORgIsp-sKP4O5WJCgrJMI6tYph4U,1880
|
|
9
|
-
ipulse_shared_core_ftredge/models/base_data_model.py,sha256=
|
|
11
|
+
ipulse_shared_core_ftredge/models/base_data_model.py,sha256=frvUDiKnjMGPXIQX_qdpNgGcm3SauCth6GiRuabmD5s,2509
|
|
10
12
|
ipulse_shared_core_ftredge/models/organization_profile.py,sha256=OnjsSVcp_LSB65F9Tl9udwNgqMg7gjSpv38eArpVXPc,3668
|
|
11
13
|
ipulse_shared_core_ftredge/models/subscription.py,sha256=bu6BtyDQ4jDkK3PLY97dZ_A3cmjzZahTkuaFOFybdxI,6892
|
|
12
14
|
ipulse_shared_core_ftredge/models/user_auth.py,sha256=YgCeK0uJ-JOkPavwzogl4wGC3RpA8PVfl-5MPS4Kxhk,432
|
|
13
15
|
ipulse_shared_core_ftredge/models/user_profile.py,sha256=5cTTZa7pMkgKCsLgTPpvz_aPn-ZyQcJ3xSEtu3jq3HE,4138
|
|
14
16
|
ipulse_shared_core_ftredge/models/user_profile_update.py,sha256=3BqAAqnVKXPKhAcfV_aOERe8GyIkX0NU_LJcQa02aLw,1319
|
|
15
|
-
ipulse_shared_core_ftredge/models/user_status.py,sha256=
|
|
16
|
-
ipulse_shared_core_ftredge/services/__init__.py,sha256=
|
|
17
|
-
ipulse_shared_core_ftredge/services/base_firestore_service.py,sha256=
|
|
17
|
+
ipulse_shared_core_ftredge/models/user_status.py,sha256=rAx8l5GrB8TN7RvZ1eIMskphRxdYqO1OZ8NnaIxUUW8,23660
|
|
18
|
+
ipulse_shared_core_ftredge/services/__init__.py,sha256=iwbBlviqOxVPmJC9tRsOyU6zzQlAn7Do0Gc3WKRi4Ao,697
|
|
19
|
+
ipulse_shared_core_ftredge/services/base_firestore_service.py,sha256=n1lymQEFcu6zHkdscNNCNIzTIVmja8cBtNy2yi5vfTE,9817
|
|
18
20
|
ipulse_shared_core_ftredge/services/base_service_exceptions.py,sha256=Bi0neeMY0YncWDeqUavu5JUslkjJ6QcDVRU32Ipjc08,4294
|
|
21
|
+
ipulse_shared_core_ftredge/services/cache_aware_firestore_service.py,sha256=DLNS1BegJUPSHs41j5jkP3g6w2tGSDUIurIWjI__xf4,6486
|
|
22
|
+
ipulse_shared_core_ftredge/services/credit_service.py,sha256=C07rOr58LsK4udznu64mQFUSBxY8AdfRaxw_9Pw_AOI,12038
|
|
19
23
|
ipulse_shared_core_ftredge/services/fastapiservicemon.py,sha256=27clTZXH32mbju8o-HLO_8VrmugmpXwHLuX-OOoIAew,5308
|
|
20
24
|
ipulse_shared_core_ftredge/services/servicemon.py,sha256=wWhsLwU1_07emaEyCNziZA1bDQVLxcfvQj0OseTLSTI,7969
|
|
21
25
|
ipulse_shared_core_ftredge/utils/__init__.py,sha256=JnxUb8I2MRjJC7rBPXSrpwBIQDEOku5O9JsiTi3oun8,56
|
|
22
26
|
ipulse_shared_core_ftredge/utils/custom_json_encoder.py,sha256=DblQLD0KOSNDyQ58wQRogBrShIXzPIZUw_oGOBATnJY,1366
|
|
23
27
|
ipulse_shared_core_ftredge/utils/json_encoder.py,sha256=QkcaFneVv3-q-s__Dz4OiUWYnM6jgHDJrDMdPv09RCA,2093
|
|
24
|
-
ipulse_shared_core_ftredge-
|
|
25
|
-
ipulse_shared_core_ftredge-
|
|
26
|
-
ipulse_shared_core_ftredge-
|
|
27
|
-
ipulse_shared_core_ftredge-
|
|
28
|
-
ipulse_shared_core_ftredge-
|
|
28
|
+
ipulse_shared_core_ftredge-13.0.1.dist-info/licenses/LICENCE,sha256=YBtYAXNqCCOo9Mr2hfkbSPAM9CeAr2j1VZBSwQTrNwE,1060
|
|
29
|
+
ipulse_shared_core_ftredge-13.0.1.dist-info/METADATA,sha256=OYQw5kTq7LHJinYen7EVx-NryTit726UoMoNm6PxtMA,803
|
|
30
|
+
ipulse_shared_core_ftredge-13.0.1.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
|
31
|
+
ipulse_shared_core_ftredge-13.0.1.dist-info/top_level.txt,sha256=8sgYrptpexkA_6_HyGvho26cVFH9kmtGvaK8tHbsGHk,27
|
|
32
|
+
ipulse_shared_core_ftredge-13.0.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|