ipulse-shared-core-ftredge 19.0.1__py3-none-any.whl → 20.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.
- ipulse_shared_core_ftredge/monitoring/__init__.py +2 -0
- ipulse_shared_core_ftredge/monitoring/microservmon.py +50 -7
- ipulse_shared_core_ftredge/monitoring/tracemon.py +320 -0
- {ipulse_shared_core_ftredge-19.0.1.dist-info → ipulse_shared_core_ftredge-20.0.1.dist-info}/METADATA +1 -1
- {ipulse_shared_core_ftredge-19.0.1.dist-info → ipulse_shared_core_ftredge-20.0.1.dist-info}/RECORD +8 -7
- {ipulse_shared_core_ftredge-19.0.1.dist-info → ipulse_shared_core_ftredge-20.0.1.dist-info}/WHEEL +0 -0
- {ipulse_shared_core_ftredge-19.0.1.dist-info → ipulse_shared_core_ftredge-20.0.1.dist-info}/licenses/LICENCE +0 -0
- {ipulse_shared_core_ftredge-19.0.1.dist-info → ipulse_shared_core_ftredge-20.0.1.dist-info}/top_level.txt +0 -0
|
@@ -125,6 +125,11 @@ class Microservmon:
|
|
|
125
125
|
"""Get the current context stack as a string."""
|
|
126
126
|
return " >> ".join(self._context_stack) if self._context_stack else "root"
|
|
127
127
|
|
|
128
|
+
@property
|
|
129
|
+
def current_trace_id(self) -> Optional[str]:
|
|
130
|
+
"""Get the current trace_id from context stack (first element if present)."""
|
|
131
|
+
return self._context_stack[0] if self._context_stack else None
|
|
132
|
+
|
|
128
133
|
@contextmanager
|
|
129
134
|
def context(self, context_name: str):
|
|
130
135
|
"""
|
|
@@ -140,6 +145,24 @@ class Microservmon:
|
|
|
140
145
|
finally:
|
|
141
146
|
self.pop_context()
|
|
142
147
|
|
|
148
|
+
@contextmanager
|
|
149
|
+
def trace(self, description: Optional[str] = None):
|
|
150
|
+
"""
|
|
151
|
+
Context manager for trace lifecycle management.
|
|
152
|
+
Automatically starts and ends a trace, managing the context stack.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
description: Optional description of the trace
|
|
156
|
+
|
|
157
|
+
Yields:
|
|
158
|
+
str: The trace_id for the current trace
|
|
159
|
+
"""
|
|
160
|
+
trace_id = self.start_trace(description)
|
|
161
|
+
try:
|
|
162
|
+
yield trace_id
|
|
163
|
+
finally:
|
|
164
|
+
self.end_trace(trace_id)
|
|
165
|
+
|
|
143
166
|
def push_context(self, context: str):
|
|
144
167
|
"""Add a context level to the stack."""
|
|
145
168
|
self._context_stack.append(context)
|
|
@@ -182,6 +205,9 @@ class Microservmon:
|
|
|
182
205
|
self._microservice_metrics["active_traces"] = len(self._active_traces)
|
|
183
206
|
self._microservice_metrics["status_counts"].add_status(ProgressStatus.IN_PROGRESS)
|
|
184
207
|
|
|
208
|
+
# Push trace_id as first element in context stack
|
|
209
|
+
self._context_stack.insert(0, trace_id)
|
|
210
|
+
|
|
185
211
|
# Log the trace start using StructLog
|
|
186
212
|
msg = description if description else f"Starting trace {trace_id}"
|
|
187
213
|
start_log = StructLog(
|
|
@@ -291,18 +317,26 @@ class Microservmon:
|
|
|
291
317
|
del self._active_traces[trace_id]
|
|
292
318
|
self._microservice_metrics["active_traces"] = len(self._active_traces)
|
|
293
319
|
|
|
320
|
+
# Remove trace_id from context stack if it's the first element
|
|
321
|
+
if self._context_stack and self._context_stack[0] == trace_id:
|
|
322
|
+
self._context_stack.pop(0)
|
|
323
|
+
|
|
294
324
|
def log(self, log: StructLog, trace_id: Optional[str] = None) -> None:
|
|
295
325
|
"""
|
|
296
326
|
Log a StructLog message with trace context.
|
|
297
327
|
|
|
298
328
|
Args:
|
|
299
329
|
log: StructLog instance to log
|
|
300
|
-
trace_id: Optional trace ID for trace-specific logging
|
|
330
|
+
trace_id: Optional trace ID for trace-specific logging. If not provided,
|
|
331
|
+
will use current_trace_id from context stack
|
|
301
332
|
"""
|
|
333
|
+
# Use provided trace_id or infer from context stack
|
|
334
|
+
effective_trace_id = trace_id or self.current_trace_id
|
|
335
|
+
|
|
302
336
|
# Calculate elapsed time
|
|
303
337
|
elapsed_ms = None
|
|
304
|
-
if
|
|
305
|
-
start_time = self._active_traces[
|
|
338
|
+
if effective_trace_id and effective_trace_id in self._active_traces:
|
|
339
|
+
start_time = self._active_traces[effective_trace_id]["start_time"]
|
|
306
340
|
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
307
341
|
else:
|
|
308
342
|
# Use microservice start time if no trace
|
|
@@ -311,9 +345,18 @@ class Microservmon:
|
|
|
311
345
|
# Set microservice-specific context on the log
|
|
312
346
|
log.collector_id = self.id
|
|
313
347
|
log.base_context = self.base_context
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
348
|
+
|
|
349
|
+
# Context with trace_id first, then other context elements
|
|
350
|
+
if effective_trace_id:
|
|
351
|
+
context_parts = [effective_trace_id]
|
|
352
|
+
# Add non-trace_id context elements (skip first element if it's trace_id)
|
|
353
|
+
other_contexts = [ctx for ctx in self._context_stack if ctx != effective_trace_id]
|
|
354
|
+
context_parts.extend(other_contexts)
|
|
355
|
+
log.context = " >> ".join(context_parts)
|
|
356
|
+
else:
|
|
357
|
+
log.context = self.current_context
|
|
358
|
+
|
|
359
|
+
log.trace_id = effective_trace_id
|
|
317
360
|
|
|
318
361
|
# Append elapsed time to existing notes
|
|
319
362
|
existing_note = log.note or ""
|
|
@@ -321,7 +364,7 @@ class Microservmon:
|
|
|
321
364
|
log.note = f"{existing_note}; {elapsed_note}" if existing_note else elapsed_note
|
|
322
365
|
|
|
323
366
|
# Update metrics for the trace or microservice
|
|
324
|
-
self._update_counts(log,
|
|
367
|
+
self._update_counts(log, effective_trace_id)
|
|
325
368
|
|
|
326
369
|
# Write to logger
|
|
327
370
|
self._write_log_to_logger(log)
|
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
"""
|
|
2
|
+
TraceMon - Per-trace monitoring and logging
|
|
3
|
+
==========================================
|
|
4
|
+
|
|
5
|
+
TraceMon is designed for complete trace isolation. Each request/operation gets
|
|
6
|
+
its own TraceMon instance, eliminating race conditions and providing clean,
|
|
7
|
+
per-trace context management.
|
|
8
|
+
|
|
9
|
+
Key Design Principles:
|
|
10
|
+
1. One TraceMon per trace (complete isolation)
|
|
11
|
+
2. No shared state between traces
|
|
12
|
+
3. Automatic trace_id inference (no parameter needed)
|
|
13
|
+
4. Thread-safe by design
|
|
14
|
+
5. Collector ID and trace_id are the same (one collector per trace)
|
|
15
|
+
"""
|
|
16
|
+
import uuid
|
|
17
|
+
import time
|
|
18
|
+
from datetime import datetime, timezone
|
|
19
|
+
from typing import Dict, Any, Optional
|
|
20
|
+
from contextlib import contextmanager
|
|
21
|
+
from collections import defaultdict
|
|
22
|
+
from ipulse_shared_base_ftredge import (LogLevel, AbstractResource,
|
|
23
|
+
ProgressStatus, Action,
|
|
24
|
+
Alert, StructLog)
|
|
25
|
+
from ipulse_shared_base_ftredge.status import StatusCounts, map_progress_status_to_log_level, eval_statuses
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class TraceMon:
|
|
29
|
+
"""
|
|
30
|
+
TraceMon handles monitoring and logging for a single trace/request.
|
|
31
|
+
|
|
32
|
+
Each trace gets its own TraceMon instance, providing complete isolation
|
|
33
|
+
and eliminating race conditions between concurrent requests.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(self, base_context: str, logger,
|
|
37
|
+
max_log_field_len: Optional[int] = 8000, # by default PipelineLog has 8000 per field length Limit
|
|
38
|
+
max_log_dict_byte_size: Optional[float] = 256 * 1024 * 0.80, # by default PipelineLog dict has 256 * 1024 * 0.80 -80% of 256Kb Limit
|
|
39
|
+
exclude_none_from_logs: bool = True):
|
|
40
|
+
"""
|
|
41
|
+
Initialize TraceMon for a single trace.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
base_context: Base context containing all execution details
|
|
45
|
+
logger: Logger instance to use
|
|
46
|
+
max_log_field_len: Maximum length for log field values
|
|
47
|
+
max_log_dict_byte_size: Maximum byte size for log dictionary
|
|
48
|
+
exclude_none_from_logs: Whether to exclude None values from logs
|
|
49
|
+
"""
|
|
50
|
+
# Create ID with timestamp prefix and UUID suffix (same pattern as pipelinemon)
|
|
51
|
+
timestamp = datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
|
|
52
|
+
uuid_suffix = str(uuid.uuid4())[:8] # Take first 8 chars of UUID
|
|
53
|
+
self._id = f"{timestamp}_{uuid_suffix}"
|
|
54
|
+
|
|
55
|
+
# Core configuration
|
|
56
|
+
self._logger = logger
|
|
57
|
+
self._base_context = base_context
|
|
58
|
+
|
|
59
|
+
# Logging configuration
|
|
60
|
+
self._max_log_field_len = max_log_field_len
|
|
61
|
+
self._max_log_dict_byte_size = max_log_dict_byte_size
|
|
62
|
+
self._exclude_none_from_logs = exclude_none_from_logs
|
|
63
|
+
|
|
64
|
+
# Trace timing
|
|
65
|
+
self._start_time = time.time()
|
|
66
|
+
self._end_time: Optional[float] = None
|
|
67
|
+
|
|
68
|
+
# Context stack (isolated per trace)
|
|
69
|
+
self._context_stack = []
|
|
70
|
+
|
|
71
|
+
# Trace metrics
|
|
72
|
+
self._metrics = {
|
|
73
|
+
"trace_id": self._id, # In TraceMon, collector_id and trace_id are the same
|
|
74
|
+
"start_time": self._start_time,
|
|
75
|
+
"status": ProgressStatus.IN_PROGRESS.name,
|
|
76
|
+
"by_event_count": defaultdict(int),
|
|
77
|
+
"by_level_code_count": defaultdict(int),
|
|
78
|
+
"status_counts": StatusCounts()
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
# Add initial status
|
|
82
|
+
self._metrics["status_counts"].add_status(ProgressStatus.STARTED)
|
|
83
|
+
|
|
84
|
+
# Log trace start
|
|
85
|
+
self._log_trace_start()
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def id(self) -> str:
|
|
89
|
+
"""Get the unique ID for this TraceMon (both collector_id and trace_id)."""
|
|
90
|
+
return self._id
|
|
91
|
+
|
|
92
|
+
@property
|
|
93
|
+
def trace_id(self) -> str:
|
|
94
|
+
"""Get the trace ID for this trace (same as collector_id)."""
|
|
95
|
+
return self._id
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def collector_id(self) -> str:
|
|
99
|
+
"""Get the collector ID for this TraceMon (same as trace_id)."""
|
|
100
|
+
return self._id
|
|
101
|
+
|
|
102
|
+
@property
|
|
103
|
+
def base_context(self) -> str:
|
|
104
|
+
"""Get the base context."""
|
|
105
|
+
return self._base_context
|
|
106
|
+
|
|
107
|
+
@property
|
|
108
|
+
def current_context(self) -> str:
|
|
109
|
+
"""Get the current context stack as a string."""
|
|
110
|
+
if not self._context_stack:
|
|
111
|
+
return self._id
|
|
112
|
+
return f"{self._id} >> " + " >> ".join(self._context_stack)
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def elapsed_ms(self) -> int:
|
|
116
|
+
"""Get elapsed time in milliseconds since trace start."""
|
|
117
|
+
return int((time.time() - self._start_time) * 1000)
|
|
118
|
+
|
|
119
|
+
@property
|
|
120
|
+
def duration_ms(self) -> Optional[int]:
|
|
121
|
+
"""Get total duration in milliseconds (only available after end)."""
|
|
122
|
+
if self._end_time is None:
|
|
123
|
+
return None
|
|
124
|
+
return int((self._end_time - self._start_time) * 1000)
|
|
125
|
+
|
|
126
|
+
@property
|
|
127
|
+
def metrics(self) -> Dict[str, Any]:
|
|
128
|
+
"""Get current trace metrics."""
|
|
129
|
+
metrics = self._metrics.copy()
|
|
130
|
+
metrics["by_event_count"] = dict(metrics["by_event_count"])
|
|
131
|
+
metrics["by_level_code_count"] = dict(metrics["by_level_code_count"])
|
|
132
|
+
metrics["elapsed_ms"] = self.elapsed_ms
|
|
133
|
+
if self.duration_ms is not None:
|
|
134
|
+
metrics["duration_ms"] = self.duration_ms
|
|
135
|
+
return metrics
|
|
136
|
+
|
|
137
|
+
@property
|
|
138
|
+
def is_active(self) -> bool:
|
|
139
|
+
"""Check if trace is still active (not ended)."""
|
|
140
|
+
return self._end_time is None
|
|
141
|
+
|
|
142
|
+
def _log_trace_start(self):
|
|
143
|
+
"""Log the trace start event."""
|
|
144
|
+
start_log = StructLog(
|
|
145
|
+
level=LogLevel.INFO,
|
|
146
|
+
description=f"Starting trace {self._id}",
|
|
147
|
+
resource=AbstractResource.MICROSERVICE_TRACE,
|
|
148
|
+
action=Action.EXECUTE,
|
|
149
|
+
progress_status=ProgressStatus.STARTED,
|
|
150
|
+
)
|
|
151
|
+
self.log(start_log)
|
|
152
|
+
|
|
153
|
+
@contextmanager
|
|
154
|
+
def context(self, context_name: str):
|
|
155
|
+
"""
|
|
156
|
+
Context manager for tracking execution context within this trace.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
context_name: The name of the execution context
|
|
160
|
+
"""
|
|
161
|
+
self.push_context(context_name)
|
|
162
|
+
try:
|
|
163
|
+
yield
|
|
164
|
+
finally:
|
|
165
|
+
self.pop_context()
|
|
166
|
+
|
|
167
|
+
def push_context(self, context: str):
|
|
168
|
+
"""Add a context level to the stack."""
|
|
169
|
+
self._context_stack.append(context)
|
|
170
|
+
|
|
171
|
+
def pop_context(self):
|
|
172
|
+
"""Remove the most recent context from the stack."""
|
|
173
|
+
if self._context_stack:
|
|
174
|
+
return self._context_stack.pop()
|
|
175
|
+
|
|
176
|
+
def log(self, log: StructLog) -> None:
|
|
177
|
+
"""
|
|
178
|
+
Log a StructLog message with this trace's context.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
log: StructLog instance to log
|
|
182
|
+
"""
|
|
183
|
+
if not self.is_active:
|
|
184
|
+
# Still allow logging but add a warning note
|
|
185
|
+
existing_note = log.note or ""
|
|
186
|
+
warning_note = "TRACE_ENDED"
|
|
187
|
+
log.note = f"{existing_note}; {warning_note}" if existing_note else warning_note
|
|
188
|
+
|
|
189
|
+
# Set trace-specific context
|
|
190
|
+
log.trace_id = self._id
|
|
191
|
+
log.collector_id = self._id
|
|
192
|
+
log.base_context = self._base_context
|
|
193
|
+
log.context = self.current_context
|
|
194
|
+
|
|
195
|
+
# Add elapsed time
|
|
196
|
+
existing_note = log.note or ""
|
|
197
|
+
elapsed_note = f"elapsed_ms: {self.elapsed_ms}"
|
|
198
|
+
log.note = f"{existing_note}; {elapsed_note}" if existing_note else elapsed_note
|
|
199
|
+
|
|
200
|
+
# Update trace metrics
|
|
201
|
+
self._update_counts(log)
|
|
202
|
+
|
|
203
|
+
# Write to logger
|
|
204
|
+
self._write_log_to_logger(log)
|
|
205
|
+
|
|
206
|
+
def _update_counts(self, log: StructLog):
|
|
207
|
+
"""Update trace metrics based on the log event."""
|
|
208
|
+
event_tuple = log.getEvent()
|
|
209
|
+
level = log.level
|
|
210
|
+
|
|
211
|
+
self._metrics["by_event_count"][event_tuple] += 1
|
|
212
|
+
self._metrics["by_level_code_count"][level.value] += 1
|
|
213
|
+
|
|
214
|
+
def _get_error_count(self) -> int:
|
|
215
|
+
"""Get total error count (ERROR + CRITICAL)."""
|
|
216
|
+
return (self._metrics["by_level_code_count"].get(LogLevel.ERROR.value, 0) +
|
|
217
|
+
self._metrics["by_level_code_count"].get(LogLevel.CRITICAL.value, 0))
|
|
218
|
+
|
|
219
|
+
def _get_warning_count(self) -> int:
|
|
220
|
+
"""Get warning count."""
|
|
221
|
+
return self._metrics["by_level_code_count"].get(LogLevel.WARNING.value, 0)
|
|
222
|
+
|
|
223
|
+
def _get_notice_count(self) -> int:
|
|
224
|
+
"""Get notice count."""
|
|
225
|
+
return self._metrics["by_level_code_count"].get(LogLevel.NOTICE.value, 0)
|
|
226
|
+
|
|
227
|
+
def end(self, force_status: Optional[ProgressStatus] = None, issues_allowed: bool = True) -> Dict[str, Any]:
|
|
228
|
+
"""
|
|
229
|
+
End this trace and return final metrics.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
force_status: Optional status to force, overriding automatic calculation
|
|
233
|
+
issues_allowed: Whether issues (errors) are allowed for this trace
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
Dict containing final trace metrics
|
|
237
|
+
"""
|
|
238
|
+
if not self.is_active:
|
|
239
|
+
# Already ended, return existing metrics
|
|
240
|
+
return self.metrics
|
|
241
|
+
|
|
242
|
+
self._end_time = time.time()
|
|
243
|
+
|
|
244
|
+
# Calculate final status
|
|
245
|
+
error_count = self._get_error_count()
|
|
246
|
+
warning_count = self._get_warning_count()
|
|
247
|
+
notice_count = self._get_notice_count()
|
|
248
|
+
|
|
249
|
+
if force_status is not None:
|
|
250
|
+
final_status = force_status
|
|
251
|
+
level = map_progress_status_to_log_level(final_status)
|
|
252
|
+
else:
|
|
253
|
+
# Simple status calculation based on error/warning/notice counts
|
|
254
|
+
if error_count > 0:
|
|
255
|
+
if issues_allowed:
|
|
256
|
+
final_status = ProgressStatus.FINISHED_WITH_ISSUES
|
|
257
|
+
else:
|
|
258
|
+
final_status = ProgressStatus.FAILED
|
|
259
|
+
elif warning_count > 0:
|
|
260
|
+
final_status = ProgressStatus.DONE_WITH_WARNINGS
|
|
261
|
+
elif notice_count > 0:
|
|
262
|
+
final_status = ProgressStatus.DONE_WITH_NOTICES
|
|
263
|
+
else:
|
|
264
|
+
final_status = ProgressStatus.DONE
|
|
265
|
+
|
|
266
|
+
level = map_progress_status_to_log_level(final_status)
|
|
267
|
+
|
|
268
|
+
# Update trace status
|
|
269
|
+
self._metrics["status"] = final_status.name
|
|
270
|
+
self._metrics["status_counts"].add_status(final_status)
|
|
271
|
+
|
|
272
|
+
# Log completion
|
|
273
|
+
status_source = "FORCED" if force_status is not None else "AUTO"
|
|
274
|
+
summary_msg = (
|
|
275
|
+
f"Trace {self._id} completed with status {final_status.name} ({status_source}). "
|
|
276
|
+
f"Duration: {self.duration_ms}ms. "
|
|
277
|
+
f"Errors: {error_count}, Warnings: {warning_count}, Notices: {notice_count}"
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
completion_log = StructLog(
|
|
281
|
+
level=level,
|
|
282
|
+
description=summary_msg,
|
|
283
|
+
resource=AbstractResource.MICROSERVICE_TRACE,
|
|
284
|
+
action=Action.EXECUTE,
|
|
285
|
+
progress_status=final_status
|
|
286
|
+
)
|
|
287
|
+
self.log(completion_log)
|
|
288
|
+
|
|
289
|
+
return self.metrics
|
|
290
|
+
|
|
291
|
+
def get_readable_level_counts(self) -> Dict[str, int]:
|
|
292
|
+
"""Get readable level counts for this trace."""
|
|
293
|
+
readable_counts = {}
|
|
294
|
+
|
|
295
|
+
for level_code, count in self._metrics["by_level_code_count"].items():
|
|
296
|
+
if count > 0:
|
|
297
|
+
for level in LogLevel:
|
|
298
|
+
if level.value == level_code:
|
|
299
|
+
readable_counts[level.name] = count
|
|
300
|
+
break
|
|
301
|
+
|
|
302
|
+
return readable_counts
|
|
303
|
+
|
|
304
|
+
def _write_log_to_logger(self, log: StructLog):
|
|
305
|
+
"""Write structured log to the logger."""
|
|
306
|
+
log_dict = log.to_dict(
|
|
307
|
+
max_field_len=self._max_log_field_len,
|
|
308
|
+
byte_size_limit=self._max_log_dict_byte_size,
|
|
309
|
+
exclude_none=self._exclude_none_from_logs
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
# Write to logger based on level
|
|
313
|
+
if log.level.value >= LogLevel.ERROR.value:
|
|
314
|
+
self._logger.error(log_dict)
|
|
315
|
+
elif log.level.value >= LogLevel.WARNING.value:
|
|
316
|
+
self._logger.warning(log_dict)
|
|
317
|
+
elif log.level.value >= LogLevel.INFO.value:
|
|
318
|
+
self._logger.info(log_dict)
|
|
319
|
+
else:
|
|
320
|
+
self._logger.debug(log_dict)
|
{ipulse_shared_core_ftredge-19.0.1.dist-info → ipulse_shared_core_ftredge-20.0.1.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ipulse_shared_core_ftredge
|
|
3
|
-
Version:
|
|
3
|
+
Version: 20.0.1
|
|
4
4
|
Summary: Shared Core models and Logger util for the Pulse platform project. Using AI for financial advisory and investment management.
|
|
5
5
|
Home-page: https://github.com/TheFutureEdge/ipulse_shared_core
|
|
6
6
|
Author: Russlan Ramdowar
|
{ipulse_shared_core_ftredge-19.0.1.dist-info → ipulse_shared_core_ftredge-20.0.1.dist-info}/RECORD
RENAMED
|
@@ -16,8 +16,9 @@ ipulse_shared_core_ftredge/models/subscription.py,sha256=bu6BtyDQ4jDkK3PLY97dZ_A
|
|
|
16
16
|
ipulse_shared_core_ftredge/models/user_auth.py,sha256=NLR3Fazj4ZKy-ym8utnfhwDbxHExsb4DVqFZ2xI4kLI,3140
|
|
17
17
|
ipulse_shared_core_ftredge/models/user_profile.py,sha256=lqpuXfdXFm-yw5xnwk_BC6QLxpBKbxaED0q2_6-GBvw,5765
|
|
18
18
|
ipulse_shared_core_ftredge/models/user_status.py,sha256=7t3aY4-FjC_Jm2NVIlV-88zcdOG9Bd_g3c5Vpt-amSw,21077
|
|
19
|
-
ipulse_shared_core_ftredge/monitoring/__init__.py,sha256=
|
|
20
|
-
ipulse_shared_core_ftredge/monitoring/microservmon.py,sha256=
|
|
19
|
+
ipulse_shared_core_ftredge/monitoring/__init__.py,sha256=lH1zGVWMuWkqh6mmBWq0GU2pdbg7lfdBXHckGlf768k,120
|
|
20
|
+
ipulse_shared_core_ftredge/monitoring/microservmon.py,sha256=hg713JDW-QGXvBlHIP--nBKF94XcWzcHCOjmkr87BrA,21031
|
|
21
|
+
ipulse_shared_core_ftredge/monitoring/tracemon.py,sha256=92a6hWhMB1nAqfKG0pCW3sBqfEHLNitInNI9O-zyv-8,11616
|
|
21
22
|
ipulse_shared_core_ftredge/services/__init__.py,sha256=HbPhpzeJjf0_B7IjfbqA1DcRb2b3ZEvdL_axx173Yh4,894
|
|
22
23
|
ipulse_shared_core_ftredge/services/cache_aware_firestore_service.py,sha256=QCDU8Xy3ztEMy9wLzoZmszQp6J7m5wEcNki2JPDWqyI,9140
|
|
23
24
|
ipulse_shared_core_ftredge/services/charging_processors.py,sha256=8bozatlie8egZFA-IUc2Vh1zjhyTdDqoe5nNgsL_ebM,16170
|
|
@@ -34,8 +35,8 @@ ipulse_shared_core_ftredge/services/user/user_holistic_operations.py,sha256=oujC
|
|
|
34
35
|
ipulse_shared_core_ftredge/utils/__init__.py,sha256=JnxUb8I2MRjJC7rBPXSrpwBIQDEOku5O9JsiTi3oun8,56
|
|
35
36
|
ipulse_shared_core_ftredge/utils/custom_json_encoder.py,sha256=DblQLD0KOSNDyQ58wQRogBrShIXzPIZUw_oGOBATnJY,1366
|
|
36
37
|
ipulse_shared_core_ftredge/utils/json_encoder.py,sha256=QkcaFneVv3-q-s__Dz4OiUWYnM6jgHDJrDMdPv09RCA,2093
|
|
37
|
-
ipulse_shared_core_ftredge-
|
|
38
|
-
ipulse_shared_core_ftredge-
|
|
39
|
-
ipulse_shared_core_ftredge-
|
|
40
|
-
ipulse_shared_core_ftredge-
|
|
41
|
-
ipulse_shared_core_ftredge-
|
|
38
|
+
ipulse_shared_core_ftredge-20.0.1.dist-info/licenses/LICENCE,sha256=YBtYAXNqCCOo9Mr2hfkbSPAM9CeAr2j1VZBSwQTrNwE,1060
|
|
39
|
+
ipulse_shared_core_ftredge-20.0.1.dist-info/METADATA,sha256=Yw11hvsKpLro4hxINzgUgcqzuBIwM-pThiAzkGsarbU,803
|
|
40
|
+
ipulse_shared_core_ftredge-20.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
41
|
+
ipulse_shared_core_ftredge-20.0.1.dist-info/top_level.txt,sha256=8sgYrptpexkA_6_HyGvho26cVFH9kmtGvaK8tHbsGHk,27
|
|
42
|
+
ipulse_shared_core_ftredge-20.0.1.dist-info/RECORD,,
|
{ipulse_shared_core_ftredge-19.0.1.dist-info → ipulse_shared_core_ftredge-20.0.1.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|