ipulse-shared-core-ftredge 2.53__py3-none-any.whl → 2.55__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.
- ipulse_shared_core_ftredge/__init__.py +2 -2
- ipulse_shared_core_ftredge/enums/__init__.py +1 -1
- ipulse_shared_core_ftredge/enums/enums_common_utils.py +11 -11
- ipulse_shared_core_ftredge/enums/enums_data_eng.py +4 -3
- ipulse_shared_core_ftredge/enums/enums_module_fincore.py +15 -15
- ipulse_shared_core_ftredge/utils_common.py +135 -310
- ipulse_shared_core_ftredge/utils_templates_and_schemas.py +12 -12
- {ipulse_shared_core_ftredge-2.53.dist-info → ipulse_shared_core_ftredge-2.55.dist-info}/METADATA +1 -1
- {ipulse_shared_core_ftredge-2.53.dist-info → ipulse_shared_core_ftredge-2.55.dist-info}/RECORD +12 -12
- {ipulse_shared_core_ftredge-2.53.dist-info → ipulse_shared_core_ftredge-2.55.dist-info}/WHEEL +1 -1
- {ipulse_shared_core_ftredge-2.53.dist-info → ipulse_shared_core_ftredge-2.55.dist-info}/LICENCE +0 -0
- {ipulse_shared_core_ftredge-2.53.dist-info → ipulse_shared_core_ftredge-2.55.dist-info}/top_level.txt +0 -0
|
@@ -5,9 +5,9 @@ from .utils_gcp import (setup_gcp_logger_and_error_report,
|
|
|
5
5
|
write_csv_to_gcs, write_json_to_gcs)
|
|
6
6
|
from .utils_templates_and_schemas import (create_bigquery_schema_from_json,
|
|
7
7
|
check_format_against_schema_template)
|
|
8
|
-
from .utils_common import (
|
|
8
|
+
from .utils_common import (ContextLog, Pipelinemon)
|
|
9
9
|
|
|
10
|
-
from .enums import (
|
|
10
|
+
from .enums import (TargetLogs, LogLevel, Unit, Frequency,
|
|
11
11
|
Module, SubModule, BaseDataCategory,
|
|
12
12
|
FinCoreCategory, FincCoreSubCategory,
|
|
13
13
|
FinCoreRecordsCategory, ExchangeOrPublisher,
|
|
@@ -6,14 +6,14 @@
|
|
|
6
6
|
from enum import Enum
|
|
7
7
|
|
|
8
8
|
|
|
9
|
-
class
|
|
10
|
-
MIXED="
|
|
11
|
-
SUCCESSES = "
|
|
12
|
-
NOTICES = "
|
|
13
|
-
SUCCESSES_AND_NOTICES = "
|
|
14
|
-
WARNINGS = "
|
|
15
|
-
WARNINGS_AND_ERRORS = "
|
|
16
|
-
ERRORS = "
|
|
9
|
+
class TargetLogs(Enum):
|
|
10
|
+
MIXED="mixed_logs"
|
|
11
|
+
SUCCESSES = "success_logs"
|
|
12
|
+
NOTICES = "notice_logs"
|
|
13
|
+
SUCCESSES_AND_NOTICES = "succs_n_notc_logs"
|
|
14
|
+
WARNINGS = "warning_logs"
|
|
15
|
+
WARNINGS_AND_ERRORS = "warn_n_err_logs"
|
|
16
|
+
ERRORS = "error_logs"
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
class LogLevel(Enum):
|
|
@@ -98,17 +98,17 @@ class Unit(Enum):
|
|
|
98
98
|
CURRENCY = "currency" # General currency, when specific currency is not needed
|
|
99
99
|
|
|
100
100
|
# Stock Market and Investments
|
|
101
|
-
SHARES = "
|
|
101
|
+
SHARES = "shares" # Number of shares
|
|
102
102
|
PERCENT = "prcnt" # Percentage, used for rates and ratios
|
|
103
103
|
BPS = "bps" # Basis points, often used for interest rates and financial ratios
|
|
104
104
|
|
|
105
105
|
# Volume and Quantitative Measurements
|
|
106
|
-
VOLUME = "
|
|
106
|
+
VOLUME = "volume" # Trading volume in units
|
|
107
107
|
MILLIONS = "mills" # Millions, used for large quantities or sums
|
|
108
108
|
BILLIONS = "bills" # Billions, used for very large quantities or sums
|
|
109
109
|
|
|
110
110
|
# Commodity Specific Units
|
|
111
|
-
BARRELS = "
|
|
111
|
+
BARRELS = "barrels" # Barrels, specifically for oil and similar liquids
|
|
112
112
|
TONNES = "tonnes" # Tonnes, for bulk materials like metals or grains
|
|
113
113
|
TROY_OUNCES = "troy_oz" # Troy ounces, specifically for precious metals
|
|
114
114
|
|
|
@@ -4,11 +4,12 @@
|
|
|
4
4
|
from enum import Enum
|
|
5
5
|
|
|
6
6
|
class SourcingTriggerType(Enum):
|
|
7
|
-
|
|
8
|
-
|
|
7
|
+
BULK_MANUAL = "bulk_manual"
|
|
8
|
+
BULK_SCHEDULED = "bulk_scheduled" # almost always historic bulk is manual
|
|
9
|
+
RECENT_SCHEDULED = "recent_scheduled"
|
|
10
|
+
RECENT_MANUAL = "recent_manual"
|
|
9
11
|
ADHOC_MANUAL = "adhoc_manual"
|
|
10
12
|
ADHOC_SCHEDULED = "adhoc_scheduled"
|
|
11
|
-
LIVE_MANUAL = "live_manual"
|
|
12
13
|
|
|
13
14
|
class SourcingPipelineType(Enum):
|
|
14
15
|
LOCAL_GET_API_TO_GCS = "local_get_api_to_gcs"
|
|
@@ -5,14 +5,14 @@ from enum import Enum
|
|
|
5
5
|
|
|
6
6
|
|
|
7
7
|
class FinCoreCategory(Enum):
|
|
8
|
-
MARKET="market"
|
|
9
|
-
|
|
10
|
-
POLITICS="poltcs"
|
|
11
|
-
CORPORATE="corp"
|
|
8
|
+
MARKET="market" # Market prices data
|
|
9
|
+
CORPORATE="corp" # Corporate data such as financial statements and earnings, similar to fundamental data
|
|
12
10
|
FUNDAMENTAL="fundam"
|
|
13
|
-
|
|
11
|
+
ECONOMY="economy"
|
|
14
12
|
NEWS="news"
|
|
13
|
+
SENTIMENT="sntmnt"
|
|
15
14
|
SOCIAL="social"
|
|
15
|
+
POLITICS="poltcs"
|
|
16
16
|
OTHER="other"
|
|
17
17
|
|
|
18
18
|
class FincCoreSubCategory(Enum):
|
|
@@ -32,16 +32,16 @@ class FincCoreSubCategory(Enum):
|
|
|
32
32
|
|
|
33
33
|
class FinCoreRecordsCategory(Enum):
|
|
34
34
|
PRICE="pric"
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
FUNDAMENTAL="fundam" # treat this differently
|
|
35
|
+
SPOT= "spot"
|
|
36
|
+
OHLCVA="ohlcva"
|
|
37
|
+
OHLCV="ohlcv"
|
|
38
|
+
OPEN="open"
|
|
39
|
+
HIGH="high"
|
|
40
|
+
LOW="low"
|
|
41
|
+
CLOSE="close"
|
|
42
|
+
VOLUME="volume"
|
|
43
|
+
ADJC="adjc"
|
|
44
|
+
FUNDAMENTAL="fundam" # treat this differently
|
|
45
45
|
EARNINGS="earnings"
|
|
46
46
|
CASH_FLOW="cashflw"
|
|
47
47
|
BALANCE_SHEET="blnce_sht"
|
|
@@ -10,18 +10,19 @@ from datetime import datetime, timezone
|
|
|
10
10
|
from contextlib import contextmanager
|
|
11
11
|
from typing import List
|
|
12
12
|
from google.cloud import logging as cloudlogging
|
|
13
|
-
from ipulse_shared_core_ftredge.enums.enums_common_utils import
|
|
13
|
+
from ipulse_shared_core_ftredge.enums.enums_common_utils import TargetLogs, LogLevel, LogStatus
|
|
14
14
|
from ipulse_shared_core_ftredge.utils_gcp import write_json_to_gcs
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
# ["data_import","data_quality", "data_processing","data_general","data_persistance","metadata_quality", "metadata_processing", "metadata_persistance","metadata_general"]
|
|
18
18
|
|
|
19
|
-
class
|
|
20
|
-
MAX_TRACEBACK_LINES =
|
|
21
|
-
def __init__(self, level: LogLevel,
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
19
|
+
class ContextLog:
|
|
20
|
+
MAX_TRACEBACK_LINES = 24 # Define the maximum number of traceback lines to include
|
|
21
|
+
def __init__(self, level: LogLevel, base_context: str = None, collector_id: str = None,
|
|
22
|
+
context: str = None, description: str = None,
|
|
23
|
+
e: Exception = None, e_type: str = None, e_message: str = None, e_traceback: str = None,
|
|
24
|
+
log_status: LogStatus = LogStatus.OPEN, subject: str = None
|
|
25
|
+
):
|
|
25
26
|
if e is not None:
|
|
26
27
|
e_type = type(e).__name__ if e_type is None else e_type
|
|
27
28
|
e_message = str(e) if e_message is None else e_message
|
|
@@ -32,7 +33,7 @@ class SmartLog:
|
|
|
32
33
|
self.level = level
|
|
33
34
|
self.subject = subject
|
|
34
35
|
self.description = description
|
|
35
|
-
self.
|
|
36
|
+
self._base_context = base_context
|
|
36
37
|
self._context = context
|
|
37
38
|
self.collector_id = collector_id
|
|
38
39
|
self.exception_type = e_type
|
|
@@ -46,7 +47,7 @@ class SmartLog:
|
|
|
46
47
|
return None
|
|
47
48
|
|
|
48
49
|
traceback_lines = e_traceback.splitlines()
|
|
49
|
-
|
|
50
|
+
|
|
50
51
|
# Remove lines that are part of the exception message if they are present in traceback
|
|
51
52
|
message_lines = e_message.splitlines() if e_message else []
|
|
52
53
|
if message_lines:
|
|
@@ -56,7 +57,7 @@ class SmartLog:
|
|
|
56
57
|
|
|
57
58
|
# Filter out lines from third-party libraries (like site-packages)
|
|
58
59
|
filtered_lines = [line for line in traceback_lines if "site-packages" not in line]
|
|
59
|
-
|
|
60
|
+
|
|
60
61
|
# If filtering results in too few lines, revert to original traceback
|
|
61
62
|
if len(filtered_lines) < 2:
|
|
62
63
|
filtered_lines = traceback_lines
|
|
@@ -86,12 +87,12 @@ class SmartLog:
|
|
|
86
87
|
return formatted_traceback
|
|
87
88
|
|
|
88
89
|
@property
|
|
89
|
-
def
|
|
90
|
-
return self.
|
|
90
|
+
def base_context(self):
|
|
91
|
+
return self._base_context
|
|
91
92
|
|
|
92
|
-
@
|
|
93
|
-
def
|
|
94
|
-
self.
|
|
93
|
+
@base_context.setter
|
|
94
|
+
def base_context(self, value):
|
|
95
|
+
self._base_context = value
|
|
95
96
|
|
|
96
97
|
@property
|
|
97
98
|
def context(self):
|
|
@@ -101,29 +102,114 @@ class SmartLog:
|
|
|
101
102
|
def context(self, value):
|
|
102
103
|
self._context = value
|
|
103
104
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
"
|
|
111
|
-
"
|
|
112
|
-
"
|
|
113
|
-
"
|
|
114
|
-
"
|
|
115
|
-
"
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
105
|
+
|
|
106
|
+
def to_dict(self, size_limit=256 * 1024 * 0.80):
|
|
107
|
+
size_limit = int(size_limit) # Ensure size_limit is an integer
|
|
108
|
+
|
|
109
|
+
# Define the priority order of the fields
|
|
110
|
+
priority_fields = [
|
|
111
|
+
("base_context", self.base_context),
|
|
112
|
+
("level_code", self.level.value),
|
|
113
|
+
("level_name", self.level.name),
|
|
114
|
+
("log_status", self.log_status.value),
|
|
115
|
+
("collector_id", self.collector_id),
|
|
116
|
+
("timestamp", self.timestamp),
|
|
117
|
+
]
|
|
118
|
+
|
|
119
|
+
# Additional fields to be truncated if necessary. Shorter fields are truncated first so that remaining size can increase for longer fields.
|
|
120
|
+
additional_fields = [
|
|
121
|
+
("subject", self.subject),
|
|
122
|
+
("description", self.description),
|
|
123
|
+
("exception_type", self.exception_type),
|
|
124
|
+
("exception_message", self.exception_message),
|
|
125
|
+
("context", self.context), # special sizing rules apply to it
|
|
126
|
+
("exception_traceback", self.exception_traceback)
|
|
127
|
+
]
|
|
128
|
+
|
|
129
|
+
all_fields = priority_fields + additional_fields
|
|
130
|
+
non_zero_fields = [(key, value) for key, value in all_fields if value is not None]
|
|
131
|
+
|
|
132
|
+
total_size = 0
|
|
133
|
+
truncated = False # Flag to indicate if truncation happened
|
|
134
|
+
|
|
135
|
+
# Function to calculate the byte size of a JSON-encoded field
|
|
136
|
+
def field_size(key, value):
|
|
137
|
+
return len(json.dumps({key: value}).encode('utf-8'))
|
|
138
|
+
|
|
139
|
+
# Function to truncate a value based on its type
|
|
140
|
+
def truncate_value(value, max_size):
|
|
141
|
+
if isinstance(value, str):
|
|
142
|
+
half_size = max_size // 2
|
|
143
|
+
return value[:half_size] + '...' + value[-(max_size - half_size - 3):]
|
|
144
|
+
elif isinstance(value, (list, tuple)):
|
|
145
|
+
half_size = max_size // 2
|
|
146
|
+
return list(value[:half_size]) + ['...'] + list(value[-(max_size - half_size - 1):])
|
|
147
|
+
elif isinstance(value, set):
|
|
148
|
+
truncated_set = set(list(value)[:max_size // 2]) | set(list(value)[-(max_size // 2):])
|
|
149
|
+
return truncated_set
|
|
150
|
+
elif isinstance(value, dict):
|
|
151
|
+
truncated_dict = {k: truncate_value(v, max_size // len(value)) for k, v in list(value.items())}
|
|
152
|
+
return truncated_dict
|
|
153
|
+
else:
|
|
154
|
+
return value
|
|
155
|
+
|
|
156
|
+
# Calculate the initial total size
|
|
157
|
+
for key, value in non_zero_fields:
|
|
158
|
+
total_size += field_size(key, value)
|
|
159
|
+
|
|
160
|
+
log_dict = {}
|
|
161
|
+
# Check if total size exceeds the size limit
|
|
162
|
+
if total_size > size_limit:
|
|
163
|
+
truncated = True # Set the truncation flag
|
|
164
|
+
# Calculate max size per field based on all non-zero fields
|
|
165
|
+
max_size_per_field = size_limit // len(non_zero_fields)
|
|
166
|
+
|
|
167
|
+
# Reset total_size to recompute with truncation
|
|
168
|
+
total_size = 0
|
|
169
|
+
|
|
170
|
+
# Add priority fields first with possible truncation
|
|
171
|
+
for key, value in priority_fields:
|
|
172
|
+
if value is not None:
|
|
173
|
+
truncated_value = value
|
|
174
|
+
if isinstance(value, (str, list, tuple, set, dict)) and field_size(key, value) > max_size_per_field:
|
|
175
|
+
truncated_value = truncate_value(value, max_size_per_field)
|
|
176
|
+
log_dict[key] = truncated_value
|
|
177
|
+
total_size += field_size(key, truncated_value)
|
|
178
|
+
else:
|
|
179
|
+
log_dict[key] = value
|
|
180
|
+
|
|
181
|
+
# Calculate remaining size for additional fields
|
|
182
|
+
remaining_size = size_limit - total_size
|
|
183
|
+
|
|
184
|
+
# Handle remaining additional fields
|
|
185
|
+
non_zero_additional_fields = [field for field in additional_fields[1:] if field[1]]
|
|
186
|
+
remaining_field_size = remaining_size // len(non_zero_additional_fields) if non_zero_additional_fields else 0
|
|
187
|
+
|
|
188
|
+
for key, value in additional_fields[1:]:
|
|
189
|
+
if value is not None:
|
|
190
|
+
if field_size(key, value) > remaining_field_size:
|
|
191
|
+
truncated_value = truncate_value(value, remaining_field_size)
|
|
192
|
+
else:
|
|
193
|
+
truncated_value = value
|
|
194
|
+
log_dict[key] = truncated_value
|
|
195
|
+
remaining_size -= field_size(key, truncated_value)
|
|
196
|
+
else:
|
|
197
|
+
log_dict[key] = value
|
|
198
|
+
else:
|
|
199
|
+
log_dict = dict(all_fields)
|
|
200
|
+
|
|
201
|
+
# Add trunc flag to the log dictionary
|
|
202
|
+
log_dict['trunc'] = truncated
|
|
203
|
+
|
|
204
|
+
return log_dict
|
|
205
|
+
|
|
206
|
+
class Pipelinemon:
|
|
121
207
|
ERROR_START_CODE = LogLevel.ERROR.value
|
|
122
208
|
WARNING_START_CODE = LogLevel.WARNING.value
|
|
123
209
|
NOTICE_START_CODE = LogLevel.NOTICE.value
|
|
124
210
|
SUCCESS_START_CODE = LogLevel.SUCCESS.value
|
|
125
211
|
|
|
126
|
-
def __init__(self,
|
|
212
|
+
def __init__(self, base_context: str, target_logs: TargetLogs = TargetLogs.MIXED, logger_name=None):
|
|
127
213
|
self._id = str(uuid.uuid4())
|
|
128
214
|
self._logs = []
|
|
129
215
|
self._early_stop = False
|
|
@@ -132,9 +218,9 @@ class Watcher:
|
|
|
132
218
|
self._notices_count = 0
|
|
133
219
|
self._successes_count = 0
|
|
134
220
|
self._level_counts = {level.name: 0 for level in LogLevel}
|
|
135
|
-
self.
|
|
221
|
+
self._base_context = base_context
|
|
136
222
|
self._context_stack = []
|
|
137
|
-
self.
|
|
223
|
+
self._target_logs = target_logs.value
|
|
138
224
|
self._logger = self._initialize_logger(logger_name)
|
|
139
225
|
|
|
140
226
|
def _initialize_logger(self, logger_name):
|
|
@@ -163,8 +249,8 @@ class Watcher:
|
|
|
163
249
|
return " >> ".join(self._context_stack)
|
|
164
250
|
|
|
165
251
|
@property
|
|
166
|
-
def
|
|
167
|
-
return self.
|
|
252
|
+
def base_context(self):
|
|
253
|
+
return self._base_context
|
|
168
254
|
|
|
169
255
|
@property
|
|
170
256
|
def id(self):
|
|
@@ -179,7 +265,7 @@ class Watcher:
|
|
|
179
265
|
if create_error_log:
|
|
180
266
|
if pop_context:
|
|
181
267
|
self.pop_context()
|
|
182
|
-
self.add_log(
|
|
268
|
+
self.add_log(ContextLog(level=LogLevel.ERROR,
|
|
183
269
|
subject="EARLY_STOP",
|
|
184
270
|
description=f"Total MAX_ERRORS_TOLERANCE of {max_errors_tolerance} has been reached."))
|
|
185
271
|
|
|
@@ -189,11 +275,11 @@ class Watcher:
|
|
|
189
275
|
def get_early_stop(self):
|
|
190
276
|
return self._early_stop
|
|
191
277
|
|
|
192
|
-
def add_log(self, log:
|
|
193
|
-
if (self.
|
|
194
|
-
(self.
|
|
195
|
-
raise ValueError(f"Invalid log level {log.level.name} for
|
|
196
|
-
log.
|
|
278
|
+
def add_log(self, log: ContextLog):
|
|
279
|
+
if (self._target_logs == TargetLogs.SUCCESSES and log.level >=self.NOTICE_START_CODE) or \
|
|
280
|
+
(self._target_logs == TargetLogs.WARNINGS_AND_ERRORS and log.level.value < self.WARNING_START_CODE):
|
|
281
|
+
raise ValueError(f"Invalid log level {log.level.name} for Pipelinemon target logs setup: {self._target_logs}")
|
|
282
|
+
log.base_context = self.base_context
|
|
197
283
|
log.context = self.current_context
|
|
198
284
|
log.collector_id = self.id
|
|
199
285
|
log_dict = log.to_dict()
|
|
@@ -201,7 +287,7 @@ class Watcher:
|
|
|
201
287
|
self._update_counts(log_dict)
|
|
202
288
|
|
|
203
289
|
if self._logger:
|
|
204
|
-
# We specifically want to avoid having an ERROR log level for this structured
|
|
290
|
+
# We specifically want to avoid having an ERROR log level for this structured Pipelinemon reporting, to ensure Errors are alerting on Critical Application Services.
|
|
205
291
|
# A single ERROR log level can be used for the entire pipeline, which shall be used at the end of the pipeline
|
|
206
292
|
if log.level.value >= self.WARNING_START_CODE:
|
|
207
293
|
self._logger.log_struct(log_dict, severity="WARNING")
|
|
@@ -210,7 +296,7 @@ class Watcher:
|
|
|
210
296
|
else:
|
|
211
297
|
self._logger.log_struct(log_dict, severity="INFO")
|
|
212
298
|
|
|
213
|
-
def add_logs(self, logs: List[
|
|
299
|
+
def add_logs(self, logs: List[ContextLog]):
|
|
214
300
|
for log in logs:
|
|
215
301
|
self.add_log(log)
|
|
216
302
|
|
|
@@ -236,7 +322,7 @@ class Watcher:
|
|
|
236
322
|
log for log in self._logs
|
|
237
323
|
if context_substring in log["context"]
|
|
238
324
|
]
|
|
239
|
-
|
|
325
|
+
|
|
240
326
|
def contains_errors(self):
|
|
241
327
|
return self._errors_count > 0
|
|
242
328
|
|
|
@@ -248,7 +334,7 @@ class Watcher:
|
|
|
248
334
|
|
|
249
335
|
def count_warnings_and_errors(self):
|
|
250
336
|
return self._warnings_count + self._errors_count
|
|
251
|
-
|
|
337
|
+
|
|
252
338
|
def count_warnings(self):
|
|
253
339
|
return self._warnings_count
|
|
254
340
|
|
|
@@ -324,7 +410,7 @@ class Watcher:
|
|
|
324
410
|
logger.error(message, exc_info=exc_info)
|
|
325
411
|
|
|
326
412
|
if not file_prefix:
|
|
327
|
-
file_prefix = self.
|
|
413
|
+
file_prefix = self._target_logs
|
|
328
414
|
if not file_name:
|
|
329
415
|
timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
|
|
330
416
|
if top_level_context:
|
|
@@ -393,265 +479,4 @@ class Watcher:
|
|
|
393
479
|
self._notices_count += 1
|
|
394
480
|
elif self.SUCCESS_START_CODE <= level_code < self.NOTICE_START_CODE:
|
|
395
481
|
self._successes_count += 1
|
|
396
|
-
self._level_counts[level_name] += 1
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
# class Watcher:
|
|
400
|
-
# ERROR_START_CODE = WatcherLogLevel.ERROR.value
|
|
401
|
-
# WARNING_START_CODE = WatcherLogLevel.WARNING.value
|
|
402
|
-
# NOTICE_START_CODE = WatcherLogLevel.NOTICE.value
|
|
403
|
-
# SUCCESS_START_CODE = WatcherLogLevel.SUCCESS.value
|
|
404
|
-
|
|
405
|
-
# def __init__(self, start_context: str, category: WatcherCategory = WatcherCategory.MIXED, logger_name=None):
|
|
406
|
-
# self._id = str(uuid.uuid4())
|
|
407
|
-
# self._logs = []
|
|
408
|
-
# self._early_stop = False
|
|
409
|
-
# self._errors_count = 0
|
|
410
|
-
# self._warnings_count = 0
|
|
411
|
-
# self._successes_count = 0
|
|
412
|
-
# self._level_counts = {level.name: 0 for level in WatcherLogLevel}
|
|
413
|
-
# self._start_context = start_context
|
|
414
|
-
# self._context_stack = []
|
|
415
|
-
# self._category = category.value
|
|
416
|
-
# self._logger = self._initialize_logger(logger_name)
|
|
417
|
-
|
|
418
|
-
# def _initialize_logger(self, logger_name):
|
|
419
|
-
# if logger_name:
|
|
420
|
-
# logging_client = cloudlogging.Client()
|
|
421
|
-
# return logging_client.logger(logger_name)
|
|
422
|
-
# return None
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
# @contextmanager
|
|
426
|
-
# def context(self, context):
|
|
427
|
-
# self.push_context(context)
|
|
428
|
-
# try:
|
|
429
|
-
# yield
|
|
430
|
-
# finally:
|
|
431
|
-
# self.pop_context()
|
|
432
|
-
|
|
433
|
-
# def push_context(self, context):
|
|
434
|
-
# self._context_stack.append(context)
|
|
435
|
-
|
|
436
|
-
# def pop_context(self):
|
|
437
|
-
# if self._context_stack:
|
|
438
|
-
# self._context_stack.pop()
|
|
439
|
-
|
|
440
|
-
# @property
|
|
441
|
-
# def current_context(self):
|
|
442
|
-
# return " >> ".join(self._context_stack)
|
|
443
|
-
|
|
444
|
-
# @property
|
|
445
|
-
# def start_context(self):
|
|
446
|
-
# return self._start_context
|
|
447
|
-
|
|
448
|
-
# @property
|
|
449
|
-
# def id(self):
|
|
450
|
-
# return self._id
|
|
451
|
-
|
|
452
|
-
# @property
|
|
453
|
-
# def early_stop(self):
|
|
454
|
-
# return self._early_stop
|
|
455
|
-
|
|
456
|
-
# def set_early_stop(self, max_errors_tolerance:int, create_error_notice=True,pop_context=False):
|
|
457
|
-
# self.early_stop = True
|
|
458
|
-
# if create_error_notice:
|
|
459
|
-
# if pop_context:
|
|
460
|
-
# self.pop_context()
|
|
461
|
-
# self.add_notice(WatcherLog(level=WatcherLogLevel.ERROR,
|
|
462
|
-
# subject="EARLY_STOP",
|
|
463
|
-
# description=f"Total MAX_ERRORS_TOLERANCE of {max_errors_tolerance} has been reached."))
|
|
464
|
-
|
|
465
|
-
# def reset_early_stop(self):
|
|
466
|
-
# self._early_stop = False
|
|
467
|
-
|
|
468
|
-
# def get_early_stop(self):
|
|
469
|
-
# return self._early_stop
|
|
470
|
-
|
|
471
|
-
# def add_notice(self, notice: WatcherLog):
|
|
472
|
-
# if (self._category == WatcherCategory.SUCCESSES.value and notice.level != WatcherLogLevel.SUCCESS) or \
|
|
473
|
-
# (self._category == WatcherCategory.WARNINGS_AND_ERRORS.value and notice.level.value < self.WARNING_START_CODE):
|
|
474
|
-
# raise ValueError(f"Invalid notice level {notice.level.name} for category {self._category}")
|
|
475
|
-
# notice.start_context = self.start_context
|
|
476
|
-
# notice.context = self.current_context
|
|
477
|
-
# notice.watcher_id = self.id
|
|
478
|
-
# notice_dict = notice.to_dict()
|
|
479
|
-
# self._logs.append(notice_dict)
|
|
480
|
-
# self._update_counts(notice_dict)
|
|
481
|
-
|
|
482
|
-
# if self._logger:
|
|
483
|
-
# if notice.level.value >= self.WARNING_START_CODE:
|
|
484
|
-
# self._logger.log_struct(notice_dict, severity="WARNING")
|
|
485
|
-
# else:
|
|
486
|
-
# self._logger.log_struct(notice_dict, severity="INFO")
|
|
487
|
-
|
|
488
|
-
# def add_notices(self, notices: List[WatcherLog]):
|
|
489
|
-
# for notice in notices:
|
|
490
|
-
# self.add_notice(notice)
|
|
491
|
-
|
|
492
|
-
# def clear_notices_and_counts(self):
|
|
493
|
-
# self._logs = []
|
|
494
|
-
# self._errors_count = 0
|
|
495
|
-
# self._warnings_count = 0
|
|
496
|
-
# self._successes_count = 0
|
|
497
|
-
# self._level_counts = {level.name: 0 for level in WatcherLogLevel}
|
|
498
|
-
|
|
499
|
-
# def clear_notices(self):
|
|
500
|
-
# self._logs = []
|
|
501
|
-
|
|
502
|
-
# def get_all_notices(self):
|
|
503
|
-
# return self._logs
|
|
504
|
-
|
|
505
|
-
# def get_notices_for_level(self, level: WatcherLogLevel):
|
|
506
|
-
# return [notice for notice in self._logs if notice["level_code"] == level.value]
|
|
507
|
-
|
|
508
|
-
# def get_notices_by_str_in_context(self, context_substring: str):
|
|
509
|
-
# return [
|
|
510
|
-
# notice for notice in self._logs
|
|
511
|
-
# if context_substring in notice["context"]
|
|
512
|
-
# ]
|
|
513
|
-
|
|
514
|
-
# def contains_errors(self):
|
|
515
|
-
# return self._errors_count > 0
|
|
516
|
-
|
|
517
|
-
# def count_errors(self):
|
|
518
|
-
# return self._errors_count
|
|
519
|
-
|
|
520
|
-
# def contains_warnings_or_errors(self):
|
|
521
|
-
# return self._warnings_count > 0 or self._errors_count > 0
|
|
522
|
-
|
|
523
|
-
# def count_warnings_and_errors(self):
|
|
524
|
-
# return self._warnings_count + self._errors_count
|
|
525
|
-
|
|
526
|
-
# def count_warnings(self):
|
|
527
|
-
# return self._warnings_count
|
|
528
|
-
|
|
529
|
-
# def count_successes(self):
|
|
530
|
-
# return self._successes_count
|
|
531
|
-
|
|
532
|
-
# def count_all_notices(self):
|
|
533
|
-
# return len(self._logs)
|
|
534
|
-
|
|
535
|
-
# def count_notices_by_level(self, level: WatcherLogLevel):
|
|
536
|
-
# return self._level_counts.get(level.name, 0)
|
|
537
|
-
|
|
538
|
-
# def _count_notices(self, context_substring: str, exact_match=False, level_code_min=None, level_code_max=None):
|
|
539
|
-
# return sum(
|
|
540
|
-
# 1 for notice in self._logs
|
|
541
|
-
# if (notice["context"] == context_substring if exact_match else context_substring in notice["context"]) and
|
|
542
|
-
# (level_code_min is None or notice["level_code"] >= level_code_min) and
|
|
543
|
-
# (level_code_max is None or notice["level_code"] <= level_code_max)
|
|
544
|
-
# )
|
|
545
|
-
|
|
546
|
-
# def count_notices_for_current_context(self):
|
|
547
|
-
# return self._count_notices(self.current_context, exact_match=True)
|
|
548
|
-
|
|
549
|
-
# def count_notices_for_current_and_nested_contexts(self):
|
|
550
|
-
# return self._count_notices(self.current_context)
|
|
551
|
-
|
|
552
|
-
# def count_notices_by_level_for_current_context(self, level: WatcherLogLevel):
|
|
553
|
-
# return self._count_notices(self.current_context, exact_match=True, level_code_min=level.value, level_code_max=level.value)
|
|
554
|
-
|
|
555
|
-
# def count_notices_by_level_for_current_and_nested_contexts(self, level: WatcherLogLevel):
|
|
556
|
-
# return self._count_notices(self.current_context, level_code_min=level.value, level_code_max=level.value)
|
|
557
|
-
|
|
558
|
-
# def count_errors_for_current_context(self):
|
|
559
|
-
# return self._count_notices(self.current_context, exact_match=True, level_code_min=self.ERROR_START_CODE)
|
|
560
|
-
|
|
561
|
-
# def count_errors_for_current_and_nested_contexts(self):
|
|
562
|
-
# return self._count_notices(self.current_context, level_code_min=self.ERROR_START_CODE)
|
|
563
|
-
|
|
564
|
-
# def count_warnings_and_errors_for_current_context(self):
|
|
565
|
-
# return self._count_notices(self.current_context, exact_match=True, level_code_min=self.WARNING_START_CODE)
|
|
566
|
-
|
|
567
|
-
# def count_warnings_and_errors_for_current_and_nested_contexts(self):
|
|
568
|
-
# return self._count_notices(self.current_context, level_code_min=self.WARNING_START_CODE)
|
|
569
|
-
|
|
570
|
-
# def count_warnings_for_current_context(self):
|
|
571
|
-
# return self._count_notices(self.current_context, exact_match=True, level_code_min=self.WARNING_START_CODE, level_code_max=self.ERROR_START_CODE - 1)
|
|
572
|
-
|
|
573
|
-
# def count_warnings_for_current_and_nested_contexts(self):
|
|
574
|
-
# return self._count_notices(self.current_context, level_code_min=self.WARNING_START_CODE, level_code_max=self.ERROR_START_CODE - 1)
|
|
575
|
-
|
|
576
|
-
# def count_successes_for_current_context(self):
|
|
577
|
-
# return self._count_notices(self.current_context, exact_match=True, level_code_min=self.SUCCESS_START_CODE, level_code_max=self.SUCCESS_START_CODE)
|
|
578
|
-
|
|
579
|
-
# def count_successes_for_current_and_nested_contexts(self):
|
|
580
|
-
# return self._count_notices(self.current_context, level_code_min=self.SUCCESS_START_CODE, level_code_max=self.SUCCESS_START_CODE)
|
|
581
|
-
|
|
582
|
-
# def export_notices_to_gcs_file(self, bucket_name, storage_client, file_prefix=None, file_name=None, top_level_context=None, save_locally=False, local_path=None, logger=None, max_retries=2):
|
|
583
|
-
# def log_message(message):
|
|
584
|
-
# if logger:
|
|
585
|
-
# logger.info(message)
|
|
586
|
-
|
|
587
|
-
# def log_error(message, exc_info=False):
|
|
588
|
-
# if logger:
|
|
589
|
-
# logger.error(message, exc_info=exc_info)
|
|
590
|
-
|
|
591
|
-
# if not file_prefix:
|
|
592
|
-
# file_prefix = self._category
|
|
593
|
-
# if not file_name:
|
|
594
|
-
# timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
|
|
595
|
-
# if top_level_context:
|
|
596
|
-
# file_name = f"{file_prefix}_{timestamp}_{top_level_context}_len{len(self._logs)}.json"
|
|
597
|
-
# else:
|
|
598
|
-
# file_name = f"{file_prefix}_{timestamp}_len{len(self._logs)}.json"
|
|
599
|
-
|
|
600
|
-
# result=None
|
|
601
|
-
# try:
|
|
602
|
-
# result= write_json_to_gcs(
|
|
603
|
-
# bucket_name=bucket_name,
|
|
604
|
-
# storage_client=storage_client,
|
|
605
|
-
# data=self._logs,
|
|
606
|
-
# file_name=file_name,
|
|
607
|
-
# save_locally=save_locally,
|
|
608
|
-
# local_path=local_path,
|
|
609
|
-
# logger=logger,
|
|
610
|
-
# max_retries=max_retries,
|
|
611
|
-
# overwrite_gcs=False
|
|
612
|
-
# )
|
|
613
|
-
# log_message(f"{file_prefix} successfully saved (ovewritten={result.get("gcs_file_overwritten")}) to GCS at {result.get("gcs_path")} and locally at {result.get("local_path")}.")
|
|
614
|
-
# except Exception as e:
|
|
615
|
-
# log_error(f"Failed at export_notices_to_gcs_file for {file_prefix} for file {file_name} to bucket {bucket_name}: {type(e).__name__} - {str(e)}")
|
|
616
|
-
|
|
617
|
-
# return result
|
|
618
|
-
|
|
619
|
-
# def import_notices_from_json(self, json_or_file, logger=None):
|
|
620
|
-
# def log_message(message):
|
|
621
|
-
# if logger:
|
|
622
|
-
# logger.info(message)
|
|
623
|
-
|
|
624
|
-
# def log_warning(message, exc_info=False):
|
|
625
|
-
# if logger:
|
|
626
|
-
# logger.warning(message, exc_info=exc_info)
|
|
627
|
-
|
|
628
|
-
# try:
|
|
629
|
-
# if isinstance(json_or_file, str): # Load from string
|
|
630
|
-
# imported_notices = json.loads(json_or_file)
|
|
631
|
-
# elif hasattr(json_or_file, 'read'): # Load from file-like object
|
|
632
|
-
# imported_notices = json.load(json_or_file)
|
|
633
|
-
# self.add_notices(imported_notices)
|
|
634
|
-
# log_message("Successfully imported notices from json.")
|
|
635
|
-
# except Exception as e:
|
|
636
|
-
# log_warning(f"Failed to import notices from json: {type(e).__name__} - {str(e)}", exc_info=True)
|
|
637
|
-
|
|
638
|
-
# def _update_counts(self, notice, remove=False):
|
|
639
|
-
# level_code = notice["level_code"]
|
|
640
|
-
# level_name = notice["level_name"]
|
|
641
|
-
|
|
642
|
-
# if remove:
|
|
643
|
-
# if level_code >= self.ERROR_START_CODE:
|
|
644
|
-
# self._errors_count -= 1
|
|
645
|
-
# elif level_code >= self.WARNING_START_CODE:
|
|
646
|
-
# self._warnings_count -= 1
|
|
647
|
-
# elif level_code >= self.SUCCESS_START_CODE:
|
|
648
|
-
# self._successes_count -= 1
|
|
649
|
-
# self._level_counts[level_name] -= 1
|
|
650
|
-
# else:
|
|
651
|
-
# if level_code >= self.ERROR_START_CODE:
|
|
652
|
-
# self._errors_count += 1
|
|
653
|
-
# elif level_code >= self.WARNING_START_CODE:
|
|
654
|
-
# self._warnings_count += 1
|
|
655
|
-
# elif level_code == self.SUCCESS_START_CODE:
|
|
656
|
-
# self._successes_count += 1
|
|
657
|
-
# self._level_counts[level_name] += 1
|
|
482
|
+
self._level_counts[level_name] += 1
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
import datetime
|
|
7
7
|
from google.cloud import bigquery
|
|
8
8
|
from ipulse_shared_core_ftredge.enums.enums_common_utils import LogLevel
|
|
9
|
-
from ipulse_shared_core_ftredge.utils_common import
|
|
9
|
+
from ipulse_shared_core_ftredge.utils_common import ContextLog
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
def create_bigquery_schema_from_json(json_schema):
|
|
@@ -59,13 +59,13 @@ def check_format_against_schema_template(data_to_check, schema, dt_ts_to_str=Tru
|
|
|
59
59
|
checked_data[field_name] = value
|
|
60
60
|
|
|
61
61
|
elif mode == "REQUIRED":
|
|
62
|
-
warning =
|
|
62
|
+
warning = ContextLog(level=LogLevel.WARNING,
|
|
63
63
|
subject=field_name,
|
|
64
64
|
description=f"Required field '{field_name}' is missing in the updates.")
|
|
65
65
|
warnings_or_error.append(warning)
|
|
66
66
|
|
|
67
67
|
except Exception as e:
|
|
68
|
-
error_log =
|
|
68
|
+
error_log = ContextLog(level=LogLevel.ERROR_EXCEPTION_REDO,
|
|
69
69
|
subject=data_to_check,
|
|
70
70
|
description=f"An error occurred during update check: {str(e)}")
|
|
71
71
|
warnings_or_error.append(error_log)
|
|
@@ -85,11 +85,11 @@ def handle_date_fields(field_name, value, dt_ts_to_str):
|
|
|
85
85
|
return value, None
|
|
86
86
|
return parsed_date, None
|
|
87
87
|
except ValueError:
|
|
88
|
-
return None,
|
|
88
|
+
return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
89
89
|
subject=field_name,
|
|
90
90
|
description=f"Expected a DATE in YYYY-MM-DD format but got {value}.")
|
|
91
91
|
else:
|
|
92
|
-
return None,
|
|
92
|
+
return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
93
93
|
subject=field_name,
|
|
94
94
|
description= f"Expected a DATE or YYYY-MM-DD str format but got {value} of type {type(value).__name__}.")
|
|
95
95
|
|
|
@@ -107,11 +107,11 @@ def handle_timestamp_fields(field_name, value, dt_ts_to_str):
|
|
|
107
107
|
return value, None
|
|
108
108
|
return parsed_datetime, None
|
|
109
109
|
except ValueError:
|
|
110
|
-
return None,
|
|
110
|
+
return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
111
111
|
subject=field_name,
|
|
112
112
|
description= f"Expected ISO format TIMESTAMP but got {value}.")
|
|
113
113
|
else:
|
|
114
|
-
return None,
|
|
114
|
+
return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
115
115
|
subject=field_name,
|
|
116
116
|
description= f"Expected ISO format TIMESTAMP but got {value} of type {type(value).__name__}.")
|
|
117
117
|
|
|
@@ -119,7 +119,7 @@ def handle_timestamp_fields(field_name, value, dt_ts_to_str):
|
|
|
119
119
|
def check_and_truncate_length(field_name, value, max_length):
|
|
120
120
|
"""Checks and truncates the length of string fields if they exceed the max length."""
|
|
121
121
|
if isinstance(value, str) and len(value) > max_length:
|
|
122
|
-
return value[:max_length],
|
|
122
|
+
return value[:max_length], ContextLog(level=LogLevel.WARNING_FIX_RECOMMENDED,
|
|
123
123
|
subject= field_name,
|
|
124
124
|
description= f"Field exceeds max length: {len(value)}/{max_length}. Truncating.")
|
|
125
125
|
|
|
@@ -129,7 +129,7 @@ def check_and_truncate_length(field_name, value, max_length):
|
|
|
129
129
|
|
|
130
130
|
def handle_type_conversion(field_type, field_name, value):
|
|
131
131
|
if field_type == "STRING" and not isinstance(value, str):
|
|
132
|
-
return str(value),
|
|
132
|
+
return str(value), ContextLog(level=LogLevel.WARNING_REVIEW_RECOMMENDED,
|
|
133
133
|
subject=field_name,
|
|
134
134
|
description= f"Expected STRING but got {value} of type {type(value).__name__}.")
|
|
135
135
|
|
|
@@ -137,18 +137,18 @@ def handle_type_conversion(field_type, field_name, value):
|
|
|
137
137
|
try:
|
|
138
138
|
return int(value), None
|
|
139
139
|
except ValueError:
|
|
140
|
-
return None,
|
|
140
|
+
return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
141
141
|
subject= field_name,
|
|
142
142
|
description=f"Expected INTEGER, but got {value} of type {type(value).__name__}.")
|
|
143
143
|
if field_type == "FLOAT64" and not isinstance(value, float):
|
|
144
144
|
try:
|
|
145
145
|
return float(value), None
|
|
146
146
|
except ValueError:
|
|
147
|
-
return None,
|
|
147
|
+
return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
|
|
148
148
|
subject=field_name,
|
|
149
149
|
description=f"Expected FLOAT, but got {value} of type {type(value).__name__}.")
|
|
150
150
|
if field_type == "BOOL" and not isinstance(value, bool):
|
|
151
|
-
return bool(value),
|
|
151
|
+
return bool(value), ContextLog(level=LogLevel.WARNING_REVIEW_RECOMMENDED,
|
|
152
152
|
subject=field_name,
|
|
153
153
|
description=f"Expected BOOL, but got {value}. Converting as {bool(value)}.")
|
|
154
154
|
|
{ipulse_shared_core_ftredge-2.53.dist-info → ipulse_shared_core_ftredge-2.55.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ipulse_shared_core_ftredge
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.55
|
|
4
4
|
Summary: Shared Core models and Logger util for the Pulse platform project. Using AI for financial advisory and investment management.
|
|
5
5
|
Home-page: https://github.com/TheFutureEdge/ipulse_shared_core
|
|
6
6
|
Author: Russlan Ramdowar
|
{ipulse_shared_core_ftredge-2.53.dist-info → ipulse_shared_core_ftredge-2.55.dist-info}/RECORD
RENAMED
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
ipulse_shared_core_ftredge/__init__.py,sha256=
|
|
2
|
-
ipulse_shared_core_ftredge/utils_common.py,sha256=
|
|
1
|
+
ipulse_shared_core_ftredge/__init__.py,sha256=gIwh_6xjoG7UIuTKNkq0iGvCuxsbyXNrTyV3vB8YdGc,869
|
|
2
|
+
ipulse_shared_core_ftredge/utils_common.py,sha256=MzOoeCZRibVE8yZd5DLMaEuKa-ed5bb4ihob6wHpXRs,20351
|
|
3
3
|
ipulse_shared_core_ftredge/utils_gcp.py,sha256=8KgsOPkLe1-1i3M_UX5niKg_CjjiNoUhZXiWFIHJdmY,11286
|
|
4
|
-
ipulse_shared_core_ftredge/utils_templates_and_schemas.py,sha256=
|
|
5
|
-
ipulse_shared_core_ftredge/enums/__init__.py,sha256=
|
|
6
|
-
ipulse_shared_core_ftredge/enums/enums_common_utils.py,sha256=
|
|
7
|
-
ipulse_shared_core_ftredge/enums/enums_data_eng.py,sha256=
|
|
8
|
-
ipulse_shared_core_ftredge/enums/enums_module_fincore.py,sha256=
|
|
4
|
+
ipulse_shared_core_ftredge/utils_templates_and_schemas.py,sha256=AwGl9J-XQc_aO_VKWhR_TuA1ML8nWxHuzHtxBH8yfwE,7499
|
|
5
|
+
ipulse_shared_core_ftredge/enums/__init__.py,sha256=reCHJE0j_QTGwag7uo3cQXMTRaGRR_YOwywweb5pFb8,839
|
|
6
|
+
ipulse_shared_core_ftredge/enums/enums_common_utils.py,sha256=gYckmSAzhh5MA3CyV18Z9-YtHsFJlmEcKC-4nuqMXu4,5673
|
|
7
|
+
ipulse_shared_core_ftredge/enums/enums_data_eng.py,sha256=7w3Jjmw84Wq22Bb5Qs09Z82Bdf-j8nhRiQJfw60_g80,1903
|
|
8
|
+
ipulse_shared_core_ftredge/enums/enums_module_fincore.py,sha256=W1TkSLu3ryLf_aif2VcKsFznWz0igeMUR_buoGEG6w8,1406
|
|
9
9
|
ipulse_shared_core_ftredge/enums/enums_modules.py,sha256=AyXUoNmR75DZLaEHi3snV6LngR25LeZRqzrLDaAupbY,1244
|
|
10
10
|
ipulse_shared_core_ftredge/models/__init__.py,sha256=gE22Gzhil0RYQa7YLtdtT44_AsWqklcDfRtgLAQc1dI,200
|
|
11
11
|
ipulse_shared_core_ftredge/models/audit_log_firestore.py,sha256=5AwO6NHuOncq65n400eqM8QPrS2EGGaP3Z_6l2rxdBE,261
|
|
@@ -18,8 +18,8 @@ ipulse_shared_core_ftredge/models/user_profile_update.py,sha256=oKK0XsQDKkgDvjFP
|
|
|
18
18
|
ipulse_shared_core_ftredge/models/user_status.py,sha256=8TyRd8tBK9_xb0MPKbI5pn9-lX7ovKbeiuWYYPtIOiw,3202
|
|
19
19
|
ipulse_shared_core_ftredge/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
20
20
|
ipulse_shared_core_ftredge/tests/test.py,sha256=0lS8HP5Quo_BqNoscU40qOH9aJRaa1Pfam5VUBmdld8,682
|
|
21
|
-
ipulse_shared_core_ftredge-2.
|
|
22
|
-
ipulse_shared_core_ftredge-2.
|
|
23
|
-
ipulse_shared_core_ftredge-2.
|
|
24
|
-
ipulse_shared_core_ftredge-2.
|
|
25
|
-
ipulse_shared_core_ftredge-2.
|
|
21
|
+
ipulse_shared_core_ftredge-2.55.dist-info/LICENCE,sha256=YBtYAXNqCCOo9Mr2hfkbSPAM9CeAr2j1VZBSwQTrNwE,1060
|
|
22
|
+
ipulse_shared_core_ftredge-2.55.dist-info/METADATA,sha256=nZuLld1WcyBi4KcRJGZr6dGRV62pYA1Nh5SkXMW1-0I,561
|
|
23
|
+
ipulse_shared_core_ftredge-2.55.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
|
|
24
|
+
ipulse_shared_core_ftredge-2.55.dist-info/top_level.txt,sha256=8sgYrptpexkA_6_HyGvho26cVFH9kmtGvaK8tHbsGHk,27
|
|
25
|
+
ipulse_shared_core_ftredge-2.55.dist-info/RECORD,,
|
{ipulse_shared_core_ftredge-2.53.dist-info → ipulse_shared_core_ftredge-2.55.dist-info}/LICENCE
RENAMED
|
File without changes
|
|
File without changes
|