ipulse-shared-core-ftredge 2.53__py3-none-any.whl → 2.54__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.

@@ -5,9 +5,9 @@ from .utils_gcp import (setup_gcp_logger_and_error_report,
5
5
  write_csv_to_gcs, write_json_to_gcs)
6
6
  from .utils_templates_and_schemas import (create_bigquery_schema_from_json,
7
7
  check_format_against_schema_template)
8
- from .utils_common import (SmartLog, Watcher)
8
+ from .utils_common import (ContextLog, PipelineWatcher)
9
9
 
10
- from .enums import (WatcherCategory, LogLevel, Unit, Frequency,
10
+ from .enums import (TargetLogs, LogLevel, Unit, Frequency,
11
11
  Module, SubModule, BaseDataCategory,
12
12
  FinCoreCategory, FincCoreSubCategory,
13
13
  FinCoreRecordsCategory, ExchangeOrPublisher,
@@ -4,7 +4,7 @@
4
4
  # pylint: disable=missing-class-docstring
5
5
 
6
6
  from .enums_common_utils import (LogLevel,
7
- WatcherCategory,
7
+ TargetLogs,
8
8
  Unit,
9
9
  Frequency)
10
10
 
@@ -6,14 +6,14 @@
6
6
  from enum import Enum
7
7
 
8
8
 
9
- class WatcherCategory(Enum):
10
- MIXED="watcher_mixed"
11
- SUCCESSES = "watcher_successes"
12
- NOTICES = "watcher_notices"
13
- SUCCESSES_AND_NOTICES = "watcher_succs_n_notcs"
14
- WARNINGS = "watcher_warnings"
15
- WARNINGS_AND_ERRORS = "watcher_warns_n_errs"
16
- ERRORS = "watcher_errors"
9
+ class TargetLogs(Enum):
10
+ MIXED="mixed_logs"
11
+ SUCCESSES = "success_logs"
12
+ NOTICES = "notice_logs"
13
+ SUCCESSES_AND_NOTICES = "succs_n_notc_logs"
14
+ WARNINGS = "warning_logs"
15
+ WARNINGS_AND_ERRORS = "warn_n_err_logs"
16
+ ERRORS = "error_logs"
17
17
 
18
18
 
19
19
  class LogLevel(Enum):
@@ -98,17 +98,17 @@ class Unit(Enum):
98
98
  CURRENCY = "currency" # General currency, when specific currency is not needed
99
99
 
100
100
  # Stock Market and Investments
101
- SHARES = "shars" # Number of shares
101
+ SHARES = "shares" # Number of shares
102
102
  PERCENT = "prcnt" # Percentage, used for rates and ratios
103
103
  BPS = "bps" # Basis points, often used for interest rates and financial ratios
104
104
 
105
105
  # Volume and Quantitative Measurements
106
- VOLUME = "vol" # Trading volume in units
106
+ VOLUME = "volume" # Trading volume in units
107
107
  MILLIONS = "mills" # Millions, used for large quantities or sums
108
108
  BILLIONS = "bills" # Billions, used for very large quantities or sums
109
109
 
110
110
  # Commodity Specific Units
111
- BARRELS = "barrls" # Barrels, specifically for oil and similar liquids
111
+ BARRELS = "barrels" # Barrels, specifically for oil and similar liquids
112
112
  TONNES = "tonnes" # Tonnes, for bulk materials like metals or grains
113
113
  TROY_OUNCES = "troy_oz" # Troy ounces, specifically for precious metals
114
114
 
@@ -4,11 +4,12 @@
4
4
  from enum import Enum
5
5
 
6
6
  class SourcingTriggerType(Enum):
7
- HISTORIC_MANUAL = "historic_manual"
8
- LIVE_SCHEDULED = "live_scheduled"
7
+ BULK_MANUAL = "bulk_manual"
8
+ BULK_SCHEDULED = "bulk_scheduled" # almost always historic bulk is manual
9
+ RECENT_SCHEDULED = "recent_scheduled"
10
+ RECENT_MANUAL = "recent_manual"
9
11
  ADHOC_MANUAL = "adhoc_manual"
10
12
  ADHOC_SCHEDULED = "adhoc_scheduled"
11
- LIVE_MANUAL = "live_manual"
12
13
 
13
14
  class SourcingPipelineType(Enum):
14
15
  LOCAL_GET_API_TO_GCS = "local_get_api_to_gcs"
@@ -5,14 +5,14 @@ from enum import Enum
5
5
 
6
6
 
7
7
  class FinCoreCategory(Enum):
8
- MARKET="market"
9
- ECONOMY="economy"
10
- POLITICS="poltcs"
11
- CORPORATE="corp"
8
+ MARKET="market" # Market prices data
9
+ CORPORATE="corp" # Corporate data such as financial statements and earnings, similar to fundamental data
12
10
  FUNDAMENTAL="fundam"
13
- SENTIMENT="sntmnt"
11
+ ECONOMY="economy"
14
12
  NEWS="news"
13
+ SENTIMENT="sntmnt"
15
14
  SOCIAL="social"
15
+ POLITICS="poltcs"
16
16
  OTHER="other"
17
17
 
18
18
  class FincCoreSubCategory(Enum):
@@ -32,16 +32,16 @@ class FincCoreSubCategory(Enum):
32
32
 
33
33
  class FinCoreRecordsCategory(Enum):
34
34
  PRICE="pric"
35
- PRICE_SPOT= "pric.s"
36
- PRICE_OHLCVA="pric.ohlcva"
37
- PRICE_OHLCV="pric.ohlcv"
38
- PRICE_OPEN="pric.o"
39
- PRICE_HIGH="pric.h"
40
- PRICE_LOW="pric.l"
41
- PRICE_CLOSE="pric.c"
42
- PRICE_VOLUME="pric.v"
43
- PRICE_ADJC="pric.a"
44
- FUNDAMENTAL="fundam" # treat this differently
35
+ SPOT= "spot"
36
+ OHLCVA="ohlcva"
37
+ OHLCV="ohlcv"
38
+ OPEN="open"
39
+ HIGH="high"
40
+ LOW="low"
41
+ CLOSE="close"
42
+ VOLUME="volume"
43
+ ADJC="adjc"
44
+ FUNDAMENTAL="fundam" # treat this differently
45
45
  EARNINGS="earnings"
46
46
  CASH_FLOW="cashflw"
47
47
  BALANCE_SHEET="blnce_sht"
@@ -10,15 +10,15 @@ from datetime import datetime, timezone
10
10
  from contextlib import contextmanager
11
11
  from typing import List
12
12
  from google.cloud import logging as cloudlogging
13
- from ipulse_shared_core_ftredge.enums.enums_common_utils import WatcherCategory, LogLevel, LogStatus
13
+ from ipulse_shared_core_ftredge.enums.enums_common_utils import TargetLogs, LogLevel, LogStatus
14
14
  from ipulse_shared_core_ftredge.utils_gcp import write_json_to_gcs
15
15
 
16
16
 
17
17
  # ["data_import","data_quality", "data_processing","data_general","data_persistance","metadata_quality", "metadata_processing", "metadata_persistance","metadata_general"]
18
18
 
19
- class SmartLog:
19
+ class ContextLog:
20
20
  MAX_TRACEBACK_LINES = 14 # Define the maximum number of traceback lines to include
21
- def __init__(self, level: LogLevel, start_context: str = None, collector_id: str = None,
21
+ def __init__(self, level: LogLevel, base_context: str = None, collector_id: str = None,
22
22
  e: Exception = None, e_type: str = None, e_message: str = None, e_traceback: str = None,
23
23
  subject: str = None, description: str = None, context: str = None,
24
24
  log_status: LogStatus = LogStatus.OPEN):
@@ -32,7 +32,7 @@ class SmartLog:
32
32
  self.level = level
33
33
  self.subject = subject
34
34
  self.description = description
35
- self._start_context = start_context
35
+ self._base_context = base_context
36
36
  self._context = context
37
37
  self.collector_id = collector_id
38
38
  self.exception_type = e_type
@@ -86,12 +86,12 @@ class SmartLog:
86
86
  return formatted_traceback
87
87
 
88
88
  @property
89
- def start_context(self):
90
- return self._start_context
89
+ def base_context(self):
90
+ return self._base_context
91
91
 
92
- @start_context.setter
93
- def start_context(self, value):
94
- self._start_context = value
92
+ @base_context.setter
93
+ def base_context(self, value):
94
+ self._base_context = value
95
95
 
96
96
  @property
97
97
  def context(self):
@@ -103,7 +103,7 @@ class SmartLog:
103
103
 
104
104
  def to_dict(self):
105
105
  return {
106
- "start_context": self.start_context,
106
+ "base_context": self.base_context,
107
107
  "context": self.context,
108
108
  "level_code": self.level.value,
109
109
  "level_name": self.level.name,
@@ -117,13 +117,13 @@ class SmartLog:
117
117
  "timestamp": self.timestamp
118
118
  }
119
119
 
120
- class Watcher:
120
+ class PipelineWatcher:
121
121
  ERROR_START_CODE = LogLevel.ERROR.value
122
122
  WARNING_START_CODE = LogLevel.WARNING.value
123
123
  NOTICE_START_CODE = LogLevel.NOTICE.value
124
124
  SUCCESS_START_CODE = LogLevel.SUCCESS.value
125
125
 
126
- def __init__(self, start_context: str, category: WatcherCategory = WatcherCategory.MIXED, logger_name=None):
126
+ def __init__(self, base_context: str, target_logs: TargetLogs = TargetLogs.MIXED, logger_name=None):
127
127
  self._id = str(uuid.uuid4())
128
128
  self._logs = []
129
129
  self._early_stop = False
@@ -132,9 +132,9 @@ class Watcher:
132
132
  self._notices_count = 0
133
133
  self._successes_count = 0
134
134
  self._level_counts = {level.name: 0 for level in LogLevel}
135
- self._start_context = start_context
135
+ self._base_context = base_context
136
136
  self._context_stack = []
137
- self._category = category.value
137
+ self._target_logs = target_logs.value
138
138
  self._logger = self._initialize_logger(logger_name)
139
139
 
140
140
  def _initialize_logger(self, logger_name):
@@ -163,8 +163,8 @@ class Watcher:
163
163
  return " >> ".join(self._context_stack)
164
164
 
165
165
  @property
166
- def start_context(self):
167
- return self._start_context
166
+ def base_context(self):
167
+ return self._base_context
168
168
 
169
169
  @property
170
170
  def id(self):
@@ -179,7 +179,7 @@ class Watcher:
179
179
  if create_error_log:
180
180
  if pop_context:
181
181
  self.pop_context()
182
- self.add_log(SmartLog(level=LogLevel.ERROR,
182
+ self.add_log(ContextLog(level=LogLevel.ERROR,
183
183
  subject="EARLY_STOP",
184
184
  description=f"Total MAX_ERRORS_TOLERANCE of {max_errors_tolerance} has been reached."))
185
185
 
@@ -189,11 +189,11 @@ class Watcher:
189
189
  def get_early_stop(self):
190
190
  return self._early_stop
191
191
 
192
- def add_log(self, log: SmartLog):
193
- if (self._category == WatcherCategory.SUCCESSES and log.level >=self.NOTICE_START_CODE) or \
194
- (self._category == WatcherCategory.WARNINGS_AND_ERRORS and log.level.value < self.WARNING_START_CODE):
195
- raise ValueError(f"Invalid log level {log.level.name} for category {self._category}")
196
- log.start_context = self.start_context
192
+ def add_log(self, log: ContextLog):
193
+ if (self._target_logs == TargetLogs.SUCCESSES and log.level >=self.NOTICE_START_CODE) or \
194
+ (self._target_logs == TargetLogs.WARNINGS_AND_ERRORS and log.level.value < self.WARNING_START_CODE):
195
+ raise ValueError(f"Invalid log level {log.level.name} for Pipeline Watcher target logs setup: {self._target_logs}")
196
+ log.base_context = self.base_context
197
197
  log.context = self.current_context
198
198
  log.collector_id = self.id
199
199
  log_dict = log.to_dict()
@@ -201,7 +201,7 @@ class Watcher:
201
201
  self._update_counts(log_dict)
202
202
 
203
203
  if self._logger:
204
- # We specifically want to avoid having an ERROR log level for this structured Watcher reporting, to ensure Errors are alerting on Critical Application Services.
204
+ # We specifically want to avoid having an ERROR log level for this structured Pipeline Watcher reporting, to ensure Errors are alerting on Critical Application Services.
205
205
  # A single ERROR log level can be used for the entire pipeline, which shall be used at the end of the pipeline
206
206
  if log.level.value >= self.WARNING_START_CODE:
207
207
  self._logger.log_struct(log_dict, severity="WARNING")
@@ -210,7 +210,7 @@ class Watcher:
210
210
  else:
211
211
  self._logger.log_struct(log_dict, severity="INFO")
212
212
 
213
- def add_logs(self, logs: List[SmartLog]):
213
+ def add_logs(self, logs: List[ContextLog]):
214
214
  for log in logs:
215
215
  self.add_log(log)
216
216
 
@@ -236,7 +236,7 @@ class Watcher:
236
236
  log for log in self._logs
237
237
  if context_substring in log["context"]
238
238
  ]
239
-
239
+
240
240
  def contains_errors(self):
241
241
  return self._errors_count > 0
242
242
 
@@ -248,7 +248,7 @@ class Watcher:
248
248
 
249
249
  def count_warnings_and_errors(self):
250
250
  return self._warnings_count + self._errors_count
251
-
251
+
252
252
  def count_warnings(self):
253
253
  return self._warnings_count
254
254
 
@@ -324,7 +324,7 @@ class Watcher:
324
324
  logger.error(message, exc_info=exc_info)
325
325
 
326
326
  if not file_prefix:
327
- file_prefix = self._category
327
+ file_prefix = self._target_logs
328
328
  if not file_name:
329
329
  timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
330
330
  if top_level_context:
@@ -393,265 +393,4 @@ class Watcher:
393
393
  self._notices_count += 1
394
394
  elif self.SUCCESS_START_CODE <= level_code < self.NOTICE_START_CODE:
395
395
  self._successes_count += 1
396
- self._level_counts[level_name] += 1
397
-
398
-
399
- # class Watcher:
400
- # ERROR_START_CODE = WatcherLogLevel.ERROR.value
401
- # WARNING_START_CODE = WatcherLogLevel.WARNING.value
402
- # NOTICE_START_CODE = WatcherLogLevel.NOTICE.value
403
- # SUCCESS_START_CODE = WatcherLogLevel.SUCCESS.value
404
-
405
- # def __init__(self, start_context: str, category: WatcherCategory = WatcherCategory.MIXED, logger_name=None):
406
- # self._id = str(uuid.uuid4())
407
- # self._logs = []
408
- # self._early_stop = False
409
- # self._errors_count = 0
410
- # self._warnings_count = 0
411
- # self._successes_count = 0
412
- # self._level_counts = {level.name: 0 for level in WatcherLogLevel}
413
- # self._start_context = start_context
414
- # self._context_stack = []
415
- # self._category = category.value
416
- # self._logger = self._initialize_logger(logger_name)
417
-
418
- # def _initialize_logger(self, logger_name):
419
- # if logger_name:
420
- # logging_client = cloudlogging.Client()
421
- # return logging_client.logger(logger_name)
422
- # return None
423
-
424
-
425
- # @contextmanager
426
- # def context(self, context):
427
- # self.push_context(context)
428
- # try:
429
- # yield
430
- # finally:
431
- # self.pop_context()
432
-
433
- # def push_context(self, context):
434
- # self._context_stack.append(context)
435
-
436
- # def pop_context(self):
437
- # if self._context_stack:
438
- # self._context_stack.pop()
439
-
440
- # @property
441
- # def current_context(self):
442
- # return " >> ".join(self._context_stack)
443
-
444
- # @property
445
- # def start_context(self):
446
- # return self._start_context
447
-
448
- # @property
449
- # def id(self):
450
- # return self._id
451
-
452
- # @property
453
- # def early_stop(self):
454
- # return self._early_stop
455
-
456
- # def set_early_stop(self, max_errors_tolerance:int, create_error_notice=True,pop_context=False):
457
- # self.early_stop = True
458
- # if create_error_notice:
459
- # if pop_context:
460
- # self.pop_context()
461
- # self.add_notice(WatcherLog(level=WatcherLogLevel.ERROR,
462
- # subject="EARLY_STOP",
463
- # description=f"Total MAX_ERRORS_TOLERANCE of {max_errors_tolerance} has been reached."))
464
-
465
- # def reset_early_stop(self):
466
- # self._early_stop = False
467
-
468
- # def get_early_stop(self):
469
- # return self._early_stop
470
-
471
- # def add_notice(self, notice: WatcherLog):
472
- # if (self._category == WatcherCategory.SUCCESSES.value and notice.level != WatcherLogLevel.SUCCESS) or \
473
- # (self._category == WatcherCategory.WARNINGS_AND_ERRORS.value and notice.level.value < self.WARNING_START_CODE):
474
- # raise ValueError(f"Invalid notice level {notice.level.name} for category {self._category}")
475
- # notice.start_context = self.start_context
476
- # notice.context = self.current_context
477
- # notice.watcher_id = self.id
478
- # notice_dict = notice.to_dict()
479
- # self._logs.append(notice_dict)
480
- # self._update_counts(notice_dict)
481
-
482
- # if self._logger:
483
- # if notice.level.value >= self.WARNING_START_CODE:
484
- # self._logger.log_struct(notice_dict, severity="WARNING")
485
- # else:
486
- # self._logger.log_struct(notice_dict, severity="INFO")
487
-
488
- # def add_notices(self, notices: List[WatcherLog]):
489
- # for notice in notices:
490
- # self.add_notice(notice)
491
-
492
- # def clear_notices_and_counts(self):
493
- # self._logs = []
494
- # self._errors_count = 0
495
- # self._warnings_count = 0
496
- # self._successes_count = 0
497
- # self._level_counts = {level.name: 0 for level in WatcherLogLevel}
498
-
499
- # def clear_notices(self):
500
- # self._logs = []
501
-
502
- # def get_all_notices(self):
503
- # return self._logs
504
-
505
- # def get_notices_for_level(self, level: WatcherLogLevel):
506
- # return [notice for notice in self._logs if notice["level_code"] == level.value]
507
-
508
- # def get_notices_by_str_in_context(self, context_substring: str):
509
- # return [
510
- # notice for notice in self._logs
511
- # if context_substring in notice["context"]
512
- # ]
513
-
514
- # def contains_errors(self):
515
- # return self._errors_count > 0
516
-
517
- # def count_errors(self):
518
- # return self._errors_count
519
-
520
- # def contains_warnings_or_errors(self):
521
- # return self._warnings_count > 0 or self._errors_count > 0
522
-
523
- # def count_warnings_and_errors(self):
524
- # return self._warnings_count + self._errors_count
525
-
526
- # def count_warnings(self):
527
- # return self._warnings_count
528
-
529
- # def count_successes(self):
530
- # return self._successes_count
531
-
532
- # def count_all_notices(self):
533
- # return len(self._logs)
534
-
535
- # def count_notices_by_level(self, level: WatcherLogLevel):
536
- # return self._level_counts.get(level.name, 0)
537
-
538
- # def _count_notices(self, context_substring: str, exact_match=False, level_code_min=None, level_code_max=None):
539
- # return sum(
540
- # 1 for notice in self._logs
541
- # if (notice["context"] == context_substring if exact_match else context_substring in notice["context"]) and
542
- # (level_code_min is None or notice["level_code"] >= level_code_min) and
543
- # (level_code_max is None or notice["level_code"] <= level_code_max)
544
- # )
545
-
546
- # def count_notices_for_current_context(self):
547
- # return self._count_notices(self.current_context, exact_match=True)
548
-
549
- # def count_notices_for_current_and_nested_contexts(self):
550
- # return self._count_notices(self.current_context)
551
-
552
- # def count_notices_by_level_for_current_context(self, level: WatcherLogLevel):
553
- # return self._count_notices(self.current_context, exact_match=True, level_code_min=level.value, level_code_max=level.value)
554
-
555
- # def count_notices_by_level_for_current_and_nested_contexts(self, level: WatcherLogLevel):
556
- # return self._count_notices(self.current_context, level_code_min=level.value, level_code_max=level.value)
557
-
558
- # def count_errors_for_current_context(self):
559
- # return self._count_notices(self.current_context, exact_match=True, level_code_min=self.ERROR_START_CODE)
560
-
561
- # def count_errors_for_current_and_nested_contexts(self):
562
- # return self._count_notices(self.current_context, level_code_min=self.ERROR_START_CODE)
563
-
564
- # def count_warnings_and_errors_for_current_context(self):
565
- # return self._count_notices(self.current_context, exact_match=True, level_code_min=self.WARNING_START_CODE)
566
-
567
- # def count_warnings_and_errors_for_current_and_nested_contexts(self):
568
- # return self._count_notices(self.current_context, level_code_min=self.WARNING_START_CODE)
569
-
570
- # def count_warnings_for_current_context(self):
571
- # return self._count_notices(self.current_context, exact_match=True, level_code_min=self.WARNING_START_CODE, level_code_max=self.ERROR_START_CODE - 1)
572
-
573
- # def count_warnings_for_current_and_nested_contexts(self):
574
- # return self._count_notices(self.current_context, level_code_min=self.WARNING_START_CODE, level_code_max=self.ERROR_START_CODE - 1)
575
-
576
- # def count_successes_for_current_context(self):
577
- # return self._count_notices(self.current_context, exact_match=True, level_code_min=self.SUCCESS_START_CODE, level_code_max=self.SUCCESS_START_CODE)
578
-
579
- # def count_successes_for_current_and_nested_contexts(self):
580
- # return self._count_notices(self.current_context, level_code_min=self.SUCCESS_START_CODE, level_code_max=self.SUCCESS_START_CODE)
581
-
582
- # def export_notices_to_gcs_file(self, bucket_name, storage_client, file_prefix=None, file_name=None, top_level_context=None, save_locally=False, local_path=None, logger=None, max_retries=2):
583
- # def log_message(message):
584
- # if logger:
585
- # logger.info(message)
586
-
587
- # def log_error(message, exc_info=False):
588
- # if logger:
589
- # logger.error(message, exc_info=exc_info)
590
-
591
- # if not file_prefix:
592
- # file_prefix = self._category
593
- # if not file_name:
594
- # timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
595
- # if top_level_context:
596
- # file_name = f"{file_prefix}_{timestamp}_{top_level_context}_len{len(self._logs)}.json"
597
- # else:
598
- # file_name = f"{file_prefix}_{timestamp}_len{len(self._logs)}.json"
599
-
600
- # result=None
601
- # try:
602
- # result= write_json_to_gcs(
603
- # bucket_name=bucket_name,
604
- # storage_client=storage_client,
605
- # data=self._logs,
606
- # file_name=file_name,
607
- # save_locally=save_locally,
608
- # local_path=local_path,
609
- # logger=logger,
610
- # max_retries=max_retries,
611
- # overwrite_gcs=False
612
- # )
613
- # log_message(f"{file_prefix} successfully saved (ovewritten={result.get("gcs_file_overwritten")}) to GCS at {result.get("gcs_path")} and locally at {result.get("local_path")}.")
614
- # except Exception as e:
615
- # log_error(f"Failed at export_notices_to_gcs_file for {file_prefix} for file {file_name} to bucket {bucket_name}: {type(e).__name__} - {str(e)}")
616
-
617
- # return result
618
-
619
- # def import_notices_from_json(self, json_or_file, logger=None):
620
- # def log_message(message):
621
- # if logger:
622
- # logger.info(message)
623
-
624
- # def log_warning(message, exc_info=False):
625
- # if logger:
626
- # logger.warning(message, exc_info=exc_info)
627
-
628
- # try:
629
- # if isinstance(json_or_file, str): # Load from string
630
- # imported_notices = json.loads(json_or_file)
631
- # elif hasattr(json_or_file, 'read'): # Load from file-like object
632
- # imported_notices = json.load(json_or_file)
633
- # self.add_notices(imported_notices)
634
- # log_message("Successfully imported notices from json.")
635
- # except Exception as e:
636
- # log_warning(f"Failed to import notices from json: {type(e).__name__} - {str(e)}", exc_info=True)
637
-
638
- # def _update_counts(self, notice, remove=False):
639
- # level_code = notice["level_code"]
640
- # level_name = notice["level_name"]
641
-
642
- # if remove:
643
- # if level_code >= self.ERROR_START_CODE:
644
- # self._errors_count -= 1
645
- # elif level_code >= self.WARNING_START_CODE:
646
- # self._warnings_count -= 1
647
- # elif level_code >= self.SUCCESS_START_CODE:
648
- # self._successes_count -= 1
649
- # self._level_counts[level_name] -= 1
650
- # else:
651
- # if level_code >= self.ERROR_START_CODE:
652
- # self._errors_count += 1
653
- # elif level_code >= self.WARNING_START_CODE:
654
- # self._warnings_count += 1
655
- # elif level_code == self.SUCCESS_START_CODE:
656
- # self._successes_count += 1
657
- # self._level_counts[level_name] += 1
396
+ self._level_counts[level_name] += 1
@@ -6,7 +6,7 @@
6
6
  import datetime
7
7
  from google.cloud import bigquery
8
8
  from ipulse_shared_core_ftredge.enums.enums_common_utils import LogLevel
9
- from ipulse_shared_core_ftredge.utils_common import SmartLog
9
+ from ipulse_shared_core_ftredge.utils_common import ContextLog
10
10
 
11
11
 
12
12
  def create_bigquery_schema_from_json(json_schema):
@@ -59,13 +59,13 @@ def check_format_against_schema_template(data_to_check, schema, dt_ts_to_str=Tru
59
59
  checked_data[field_name] = value
60
60
 
61
61
  elif mode == "REQUIRED":
62
- warning = SmartLog(level=LogLevel.WARNING,
62
+ warning = ContextLog(level=LogLevel.WARNING,
63
63
  subject=field_name,
64
64
  description=f"Required field '{field_name}' is missing in the updates.")
65
65
  warnings_or_error.append(warning)
66
66
 
67
67
  except Exception as e:
68
- error_log = SmartLog(level=LogLevel.ERROR_EXCEPTION_REDO,
68
+ error_log = ContextLog(level=LogLevel.ERROR_EXCEPTION_REDO,
69
69
  subject=data_to_check,
70
70
  description=f"An error occurred during update check: {str(e)}")
71
71
  warnings_or_error.append(error_log)
@@ -85,11 +85,11 @@ def handle_date_fields(field_name, value, dt_ts_to_str):
85
85
  return value, None
86
86
  return parsed_date, None
87
87
  except ValueError:
88
- return None, SmartLog(level=LogLevel.WARNING_FIX_REQUIRED,
88
+ return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
89
89
  subject=field_name,
90
90
  description=f"Expected a DATE in YYYY-MM-DD format but got {value}.")
91
91
  else:
92
- return None, SmartLog(level=LogLevel.WARNING_FIX_REQUIRED,
92
+ return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
93
93
  subject=field_name,
94
94
  description= f"Expected a DATE or YYYY-MM-DD str format but got {value} of type {type(value).__name__}.")
95
95
 
@@ -107,11 +107,11 @@ def handle_timestamp_fields(field_name, value, dt_ts_to_str):
107
107
  return value, None
108
108
  return parsed_datetime, None
109
109
  except ValueError:
110
- return None, SmartLog(level=LogLevel.WARNING_FIX_REQUIRED,
110
+ return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
111
111
  subject=field_name,
112
112
  description= f"Expected ISO format TIMESTAMP but got {value}.")
113
113
  else:
114
- return None, SmartLog(level=LogLevel.WARNING_FIX_REQUIRED,
114
+ return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
115
115
  subject=field_name,
116
116
  description= f"Expected ISO format TIMESTAMP but got {value} of type {type(value).__name__}.")
117
117
 
@@ -119,7 +119,7 @@ def handle_timestamp_fields(field_name, value, dt_ts_to_str):
119
119
  def check_and_truncate_length(field_name, value, max_length):
120
120
  """Checks and truncates the length of string fields if they exceed the max length."""
121
121
  if isinstance(value, str) and len(value) > max_length:
122
- return value[:max_length], SmartLog(level=LogLevel.WARNING_FIX_RECOMMENDED,
122
+ return value[:max_length], ContextLog(level=LogLevel.WARNING_FIX_RECOMMENDED,
123
123
  subject= field_name,
124
124
  description= f"Field exceeds max length: {len(value)}/{max_length}. Truncating.")
125
125
 
@@ -129,7 +129,7 @@ def check_and_truncate_length(field_name, value, max_length):
129
129
 
130
130
  def handle_type_conversion(field_type, field_name, value):
131
131
  if field_type == "STRING" and not isinstance(value, str):
132
- return str(value), SmartLog(level=LogLevel.WARNING_REVIEW_RECOMMENDED,
132
+ return str(value), ContextLog(level=LogLevel.WARNING_REVIEW_RECOMMENDED,
133
133
  subject=field_name,
134
134
  description= f"Expected STRING but got {value} of type {type(value).__name__}.")
135
135
 
@@ -137,18 +137,18 @@ def handle_type_conversion(field_type, field_name, value):
137
137
  try:
138
138
  return int(value), None
139
139
  except ValueError:
140
- return None, SmartLog(level=LogLevel.WARNING_FIX_REQUIRED,
140
+ return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
141
141
  subject= field_name,
142
142
  description=f"Expected INTEGER, but got {value} of type {type(value).__name__}.")
143
143
  if field_type == "FLOAT64" and not isinstance(value, float):
144
144
  try:
145
145
  return float(value), None
146
146
  except ValueError:
147
- return None, SmartLog(level=LogLevel.WARNING_FIX_REQUIRED,
147
+ return None, ContextLog(level=LogLevel.WARNING_FIX_REQUIRED,
148
148
  subject=field_name,
149
149
  description=f"Expected FLOAT, but got {value} of type {type(value).__name__}.")
150
150
  if field_type == "BOOL" and not isinstance(value, bool):
151
- return bool(value), SmartLog(level=LogLevel.WARNING_REVIEW_RECOMMENDED,
151
+ return bool(value), ContextLog(level=LogLevel.WARNING_REVIEW_RECOMMENDED,
152
152
  subject=field_name,
153
153
  description=f"Expected BOOL, but got {value}. Converting as {bool(value)}.")
154
154
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ipulse_shared_core_ftredge
3
- Version: 2.53
3
+ Version: 2.54
4
4
  Summary: Shared Core models and Logger util for the Pulse platform project. Using AI for financial advisory and investment management.
5
5
  Home-page: https://github.com/TheFutureEdge/ipulse_shared_core
6
6
  Author: Russlan Ramdowar
@@ -1,11 +1,11 @@
1
- ipulse_shared_core_ftredge/__init__.py,sha256=CcHx8XkC7YJ5pOxsOpZJrTuxweN1ya1WlQJZjOTwrBY,868
2
- ipulse_shared_core_ftredge/utils_common.py,sha256=GEo4Xilh9quDdUh_ppOVO6G7ustHWkSaxuILKC_FLNo,27406
1
+ ipulse_shared_core_ftredge/__init__.py,sha256=BBOoNtI-PdKen1nCcmfBEXJ40X0Jf6b8Yeo6ytJT5rQ,873
2
+ ipulse_shared_core_ftredge/utils_common.py,sha256=r4jYBsu6TnAINTewk72CZebWH_P_oVf5LlkeDy9-ndU,16482
3
3
  ipulse_shared_core_ftredge/utils_gcp.py,sha256=8KgsOPkLe1-1i3M_UX5niKg_CjjiNoUhZXiWFIHJdmY,11286
4
- ipulse_shared_core_ftredge/utils_templates_and_schemas.py,sha256=CHrFbhRVrXlqDzGdPe9nujn5uFQtIN2xW7RBTiHYFBc,7475
5
- ipulse_shared_core_ftredge/enums/__init__.py,sha256=PT8Ig7hcx_hhVlsfun24H0pFjbdfQb201ZtJplQ9uAE,844
6
- ipulse_shared_core_ftredge/enums/enums_common_utils.py,sha256=CB0IMW5aer-n50G3AM6Fz-NrN85mJkvZhSrnuUb7EMs,5702
7
- ipulse_shared_core_ftredge/enums/enums_data_eng.py,sha256=2i6Qo6Yi_j_O9xxnOD6QA-r0Cv7mWAUaKUx907XMRio,1825
8
- ipulse_shared_core_ftredge/enums/enums_module_fincore.py,sha256=MuqQg249clrWUOBb1S-iPsoOldN2_F3ohRQizbjhwG0,1374
4
+ ipulse_shared_core_ftredge/utils_templates_and_schemas.py,sha256=AwGl9J-XQc_aO_VKWhR_TuA1ML8nWxHuzHtxBH8yfwE,7499
5
+ ipulse_shared_core_ftredge/enums/__init__.py,sha256=reCHJE0j_QTGwag7uo3cQXMTRaGRR_YOwywweb5pFb8,839
6
+ ipulse_shared_core_ftredge/enums/enums_common_utils.py,sha256=gYckmSAzhh5MA3CyV18Z9-YtHsFJlmEcKC-4nuqMXu4,5673
7
+ ipulse_shared_core_ftredge/enums/enums_data_eng.py,sha256=7w3Jjmw84Wq22Bb5Qs09Z82Bdf-j8nhRiQJfw60_g80,1903
8
+ ipulse_shared_core_ftredge/enums/enums_module_fincore.py,sha256=W1TkSLu3ryLf_aif2VcKsFznWz0igeMUR_buoGEG6w8,1406
9
9
  ipulse_shared_core_ftredge/enums/enums_modules.py,sha256=AyXUoNmR75DZLaEHi3snV6LngR25LeZRqzrLDaAupbY,1244
10
10
  ipulse_shared_core_ftredge/models/__init__.py,sha256=gE22Gzhil0RYQa7YLtdtT44_AsWqklcDfRtgLAQc1dI,200
11
11
  ipulse_shared_core_ftredge/models/audit_log_firestore.py,sha256=5AwO6NHuOncq65n400eqM8QPrS2EGGaP3Z_6l2rxdBE,261
@@ -18,8 +18,8 @@ ipulse_shared_core_ftredge/models/user_profile_update.py,sha256=oKK0XsQDKkgDvjFP
18
18
  ipulse_shared_core_ftredge/models/user_status.py,sha256=8TyRd8tBK9_xb0MPKbI5pn9-lX7ovKbeiuWYYPtIOiw,3202
19
19
  ipulse_shared_core_ftredge/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
20
  ipulse_shared_core_ftredge/tests/test.py,sha256=0lS8HP5Quo_BqNoscU40qOH9aJRaa1Pfam5VUBmdld8,682
21
- ipulse_shared_core_ftredge-2.53.dist-info/LICENCE,sha256=YBtYAXNqCCOo9Mr2hfkbSPAM9CeAr2j1VZBSwQTrNwE,1060
22
- ipulse_shared_core_ftredge-2.53.dist-info/METADATA,sha256=9JixJKcqPsiCzRQR6ZpOiKwDIOcjEHFY6OG-VILJ_zg,561
23
- ipulse_shared_core_ftredge-2.53.dist-info/WHEEL,sha256=rWxmBtp7hEUqVLOnTaDOPpR-cZpCDkzhhcBce-Zyd5k,91
24
- ipulse_shared_core_ftredge-2.53.dist-info/top_level.txt,sha256=8sgYrptpexkA_6_HyGvho26cVFH9kmtGvaK8tHbsGHk,27
25
- ipulse_shared_core_ftredge-2.53.dist-info/RECORD,,
21
+ ipulse_shared_core_ftredge-2.54.dist-info/LICENCE,sha256=YBtYAXNqCCOo9Mr2hfkbSPAM9CeAr2j1VZBSwQTrNwE,1060
22
+ ipulse_shared_core_ftredge-2.54.dist-info/METADATA,sha256=EB0Hd9nuPAFaIClcQR5RIGECC1L9G2ZpVXsZ7gYNFB4,561
23
+ ipulse_shared_core_ftredge-2.54.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
24
+ ipulse_shared_core_ftredge-2.54.dist-info/top_level.txt,sha256=8sgYrptpexkA_6_HyGvho26cVFH9kmtGvaK8tHbsGHk,27
25
+ ipulse_shared_core_ftredge-2.54.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (71.0.4)
2
+ Generator: setuptools (71.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5