ipulse-shared-core-ftredge 2.6__py3-none-any.whl → 2.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ipulse-shared-core-ftredge might be problematic. Click here for more details.

Files changed (36) hide show
  1. ipulse_shared_core_ftredge/__init__.py +22 -4
  2. ipulse_shared_core_ftredge/enums/__init__.py +37 -0
  3. ipulse_shared_core_ftredge/enums/enums_common_utils.py +107 -0
  4. ipulse_shared_core_ftredge/enums/enums_data_eng.py +313 -0
  5. ipulse_shared_core_ftredge/enums/enums_logging.py +108 -0
  6. ipulse_shared_core_ftredge/enums/enums_module_fincore.py +72 -0
  7. ipulse_shared_core_ftredge/enums/enums_modules.py +31 -0
  8. ipulse_shared_core_ftredge/enums/enums_solution_providers.py +24 -0
  9. ipulse_shared_core_ftredge/{models → enums}/pulse_enums.py +10 -46
  10. ipulse_shared_core_ftredge/models/__init__.py +0 -1
  11. ipulse_shared_core_ftredge/models/organisation.py +61 -55
  12. ipulse_shared_core_ftredge/models/resource_catalog_item.py +97 -171
  13. ipulse_shared_core_ftredge/models/user_profile.py +10 -9
  14. ipulse_shared_core_ftredge/models/user_profile_update.py +32 -14
  15. ipulse_shared_core_ftredge/models/user_status.py +21 -11
  16. ipulse_shared_core_ftredge/utils/__init__.py +23 -0
  17. ipulse_shared_core_ftredge/utils/logs/__init__.py +2 -0
  18. ipulse_shared_core_ftredge/{models → utils/logs}/audit_log_firestore.py +1 -1
  19. ipulse_shared_core_ftredge/utils/logs/context_log.py +210 -0
  20. ipulse_shared_core_ftredge/utils/logs/get_logger.py +103 -0
  21. ipulse_shared_core_ftredge/utils/utils_cloud.py +53 -0
  22. ipulse_shared_core_ftredge/utils/utils_cloud_gcp.py +442 -0
  23. ipulse_shared_core_ftredge/utils/utils_cloud_gcp_with_collectors.py +166 -0
  24. ipulse_shared_core_ftredge/utils/utils_cloud_with_collectors.py +27 -0
  25. ipulse_shared_core_ftredge/utils/utils_collector_pipelinemon.py +356 -0
  26. ipulse_shared_core_ftredge/utils/utils_common.py +180 -0
  27. ipulse_shared_core_ftredge/utils/utils_templates_and_schemas.py +151 -0
  28. ipulse_shared_core_ftredge-2.7.1.dist-info/METADATA +14 -0
  29. ipulse_shared_core_ftredge-2.7.1.dist-info/RECORD +33 -0
  30. {ipulse_shared_core_ftredge-2.6.dist-info → ipulse_shared_core_ftredge-2.7.1.dist-info}/WHEEL +1 -1
  31. ipulse_shared_core_ftredge/tests/__init__.py +0 -0
  32. ipulse_shared_core_ftredge/tests/test.py +0 -17
  33. ipulse_shared_core_ftredge-2.6.dist-info/METADATA +0 -11
  34. ipulse_shared_core_ftredge-2.6.dist-info/RECORD +0 -17
  35. {ipulse_shared_core_ftredge-2.6.dist-info → ipulse_shared_core_ftredge-2.7.1.dist-info}/LICENCE +0 -0
  36. {ipulse_shared_core_ftredge-2.6.dist-info → ipulse_shared_core_ftredge-2.7.1.dist-info}/top_level.txt +0 -0
@@ -1,18 +1,36 @@
1
-
2
1
  from typing import Optional, Set
3
- from pydantic import BaseModel
4
- from datetime import datetime , date
2
+ from pydantic import BaseModel, Field, EmailStr
3
+ from datetime import date, datetime
4
+
5
+
6
+ CLASS_ORIGIN_AUTHOR="Russlan Ramdowar;russlan@ftredge.com"
7
+ CLASS_ORGIN_DATE=datetime(2024, 3, 15, 20, 15)
8
+
9
+ CLASS_VERSION = 2.01
10
+ CLASS_REVISION_AUTHOR="Russlan Ramdowar;russlan@ftredge.com"
11
+ CLASS_REVISION_DATE=datetime(2024, 3, 15, 20, 15)
12
+ LAST_MODIFICATION="Created , with all fields Optional"
5
13
 
6
14
  class UserProfileUpdate(BaseModel):
7
- email: Optional[str] = None
8
- username: Optional[str] = None
9
- aliases: Optional[Set[str]] = None
10
- first_name: Optional[str] = None
11
- last_name: Optional[str] = None
12
- mobile: Optional[str] = None
13
- dob: Optional[date] = None
14
- updt_date: Optional[datetime] = None
15
- updt_by_user: Optional[str] = None
15
+ schema_version: Optional[float] = Field(None, description="Version of this Class == version of DB Schema")
16
+ email: Optional[EmailStr] = Field(None, description="Propagated from Firebase Auth")
17
+ organizations_uids: Optional[Set[str]] = Field(None, description="Depends on Subscription Plan, Regularly Updated")
18
+ creat_date: Optional[datetime] = Field(None, description="Creation date")
19
+ creat_by_user: Optional[str] = Field(None, description="Created by user")
20
+ updt_date: Optional[datetime] = Field(None, description="Update date")
21
+ updt_by_user: Optional[str] = Field(None, description="Updated by user")
22
+ aliases: Optional[Set[str]] = Field(None, description="User aliases")
23
+ provider_id: Optional[str] = Field(None, description="Provider ID")
24
+
25
+ username: Optional[str] = Field(None, description="Username")
26
+ dob: Optional[date] = Field(None, description="Date of Birth")
27
+ first_name: Optional[str] = Field(None, description="First Name")
28
+ last_name: Optional[str] = Field(None, description="Last Name")
29
+ mobile: Optional[str] = Field(None, description="Mobile Number")
16
30
 
17
- def model_dump(self, **kwargs):
18
- return super().model_dump(exclude_none=True, **kwargs)
31
+ # def model_dump(self, **kwargs):
32
+ # return super().model_dump(exclude_none=True, **kwargs)
33
+
34
+
35
+
36
+
@@ -1,36 +1,46 @@
1
1
  from pydantic import BaseModel, Field
2
+
2
3
  from datetime import datetime
3
4
  from dateutil.relativedelta import relativedelta
4
- from typing import Set, Optional
5
- # import uuid
6
- from . import pulse_enums as enums
5
+ from typing import Set, Optional, Dict, List
6
+
7
+
7
8
 
8
9
 
9
10
  CLASS_ORIGIN_AUTHOR="Russlan Ramdowar;russlan@ftredge.com"
10
11
  CLASS_ORGIN_DATE=datetime(2024, 2, 12, 20, 5)
11
12
 
12
- CLASS_VERSION = 2.1
13
+ SCHEMA_VERSION = 2.3
13
14
  CLASS_REVISION_AUTHOR="Russlan Ramdowar;russlan@ftredge.com"
14
15
  CLASS_REVISION_DATE=datetime(2024, 2, 13, 20, 15)
16
+ LAST_MODIFICATION="Changed default IAM_GROUPS"
15
17
 
16
18
  DOMAIN="user"
17
19
  OBJ_REF = "usrsttus"
20
+
21
+ DEFAULT_IAM_GROUPS={"pulseroot":["full_open_read"]}
22
+ DEFAULT_SUBSCRIPTION_PLAN="subscription_free"
23
+ DEFAULT_SUBSCRIPTION_STATUS="active"
24
+ DEFAULT_SUBSCRIPTION_INSIGHT_CREDITS=10
25
+ DEFAULT_EXTRA_INSIGHT_CREDITS=0
26
+
27
+ ############################################ !!!!! ALWAYS UPDATE SCHEMA VERSION , IF SCHEMA IS BEING MODIFIED !!! ############################################
18
28
  class UserStatus(BaseModel):
19
- schema_version: float = Field(default=CLASS_VERSION, description="Version of this Class == version of DB Schema") #User can Read only
29
+ schema_version: float = Field(default=SCHEMA_VERSION, description="Version of this Class == version of DB Schema") #User can Read only
20
30
  # uid: str = Field(frozen=True, description="Generated by Firebase Auth") #User can Read only
21
31
  # puid:str = Field(default_factory=lambda: f"{DOMAIN}{OBJ_REF}{datetime.utcnow().strftime('%Y%m%d%H%M')}{uuid.uuid4().hex[:8]}".lower(),
22
32
  # frozen=True,
23
33
  # description="Generated Automatically by default_factory") #User can Read only
24
34
 
25
- iam_groups: Set[str] = Field(default_factory=lambda:{"pulseroot__authuser_open"}, description="User's Groups, with a default one for all authenticated Pulse users") #User can Read only
26
- sbscrptn_plan: str=Field(default_factory=lambda:"subscription_free", description="Subscription Plan ") #User can Read only
27
- sbscrptn_status: str=Field(default_factory=lambda:"active", description="Subscription Status") #User can Read only
35
+ iam_groups: Dict[str, List[str]] = Field(default_factory=lambda:DEFAULT_IAM_GROUPS, description="User's Groups, with a default one for all authenticated Pulse users") #User can Read only
36
+ sbscrptn_plan: str=Field(default_factory=lambda:DEFAULT_SUBSCRIPTION_PLAN, description="Subscription Plan ") #User can Read only
37
+ sbscrptn_status: str=Field(default_factory=lambda:DEFAULT_SUBSCRIPTION_STATUS, description="Subscription Status") #User can Read only
28
38
  sbscrptn_start_date: datetime=Field(default_factory=lambda:datetime.utcnow(), description="Subscription Start Date") #User can Read only
29
39
  sbscrptn_end_date: datetime=Field(default_factory=lambda:datetime.utcnow()+relativedelta(years=1) , description="Subscription End Date") #User can Read only
30
- sbscrptn_insight_credits: int= Field(default_factory=lambda:7, description="Depends on Subscription Plan, Set Amount udated at Regular Intervals or at Regular Time") #User can Read only
40
+ sbscrptn_insight_credits: int= Field(default_factory=lambda:DEFAULT_SUBSCRIPTION_INSIGHT_CREDITS, description="Depends on Subscription Plan, Set Amount udated at Regular Intervals or at Regular Time") #User can Read only
31
41
  sbscrptn_ins_crdts_updtd_since_datetime: datetime=Field(default_factory=lambda:datetime.utcnow(), description="Subscription Start Date") #User can Read only #User can Read only #User can Read only
32
- extra_insigth_credits: int= Field(default_factory=lambda:0, description="If user purchased extra Insigth Credits they shouldn't Expire") #User can Read only
33
- payment_ref_uids: Optional[Set[str]] = None #User can Read only
42
+ extra_insight_credits: int= Field(default_factory=lambda:DEFAULT_EXTRA_INSIGHT_CREDITS, description="If user purchased extra Insigth Credits they shouldn't Expire") #User can Read only
43
+ payment_refs_uids: Optional[Set[str]] = None #User can Read only
34
44
 
35
45
  creat_date: datetime #User can Read only
36
46
  creat_by_user: str #User can Read only / Ideally shouldn't be able to see even
@@ -0,0 +1,23 @@
1
+ # pylint: disable=missing-module-docstring
2
+
3
+ from .logs import (ContextLog, get_logger)
4
+ from .utils_common import (save_json_locally_extended,
5
+ log_error,
6
+ log_warning,
7
+ log_info,
8
+ prepare_full_file_path)
9
+
10
+ from .utils_collector_pipelinemon import ( Pipelinemon)
11
+
12
+ from .utils_cloud_gcp import (add_gcp_cloud_logging,
13
+ add_gcp_error_reporting,
14
+ create_bigquery_schema_from_json,
15
+ read_csv_from_gcs, read_json_from_gcs,
16
+ write_csv_to_gcs,write_json_to_gcs_extended)
17
+
18
+
19
+ from .utils_cloud import (write_json_to_cloud_storage_extended,
20
+ read_json_from_cloud_storage)
21
+
22
+
23
+ from .utils_templates_and_schemas import (check_format_against_schema_template)
@@ -0,0 +1,2 @@
1
+ from .context_log import ContextLog
2
+ from .get_logger import get_logger
@@ -9,4 +9,4 @@ class AuditLogFirestore(BaseModel):
9
9
  field_name: str
10
10
  old_value: str
11
11
  new_value: str
12
- timestamp: datetime.utcnow()
12
+ timestamp: datetime
@@ -0,0 +1,210 @@
1
+
2
+ # pylint: disable=missing-module-docstring
3
+ # pylint: disable=missing-function-docstring
4
+ # pylint: disable=logging-fstring-interpolation
5
+ # pylint: disable=line-too-long
6
+ # pylint: disable=missing-class-docstring
7
+ # pylint: disable=broad-exception-caught
8
+ # pylint: disable=unused-variable
9
+ import traceback
10
+ import json
11
+ from datetime import datetime, timezone
12
+ from typing import List
13
+ from ipulse_shared_core_ftredge import Status, LogLevel
14
+
15
+ ############################################################################
16
+ ##################### SETTING UP custom LOGGING format= DICT ##########################
17
+ ### Cloud Agnostic, can be used with any cloud provider , jsut use to_dict() method to get the log in dict format
18
+ class ContextLog:
19
+
20
+ def __init__(self, level: LogLevel, base_context: str = None, collector_id: str = None,
21
+ context: str = None, description: str = None,
22
+ e: Exception = None, e_type: str = None, e_message: str = None, e_traceback: str = None,
23
+ log_status: Status = Status.OPEN, subject: str = None, systems_impacted: List[str] = None,
24
+ ):
25
+
26
+ if e is not None:
27
+ e_type = type(e).__name__ if e_type is None else e_type
28
+ e_message = str(e) if e_message is None else e_message
29
+ e_traceback = traceback.format_exc() if e_traceback is None else e_traceback
30
+ elif (e_traceback is None or e_traceback== "") and (e_type or e_message):
31
+ e_traceback = traceback.format_exc()
32
+
33
+ self.level = level
34
+ self.subject = subject
35
+ self.description = description
36
+ self._base_context = base_context
37
+ self._context = context
38
+ self._systems_impacted = systems_impacted if systems_impacted else []
39
+ self.collector_id = collector_id
40
+ self.exception_type = e_type
41
+ self.exception_message = e_message
42
+ self.exception_traceback = e_traceback
43
+ self.log_status = log_status
44
+ self.timestamp = datetime.now(timezone.utc).isoformat()
45
+
46
+ @property
47
+ def base_context(self):
48
+ return self._base_context
49
+
50
+ @base_context.setter
51
+ def base_context(self, value):
52
+ self._base_context = value
53
+
54
+ @property
55
+ def context(self):
56
+ return self._context
57
+
58
+ @context.setter
59
+ def context(self, value):
60
+ self._context = value
61
+
62
+ @property
63
+ def systems_impacted(self):
64
+ return self._systems_impacted
65
+
66
+ @systems_impacted.setter
67
+ def systems_impacted(self, list_of_si: List[str]):
68
+ self._systems_impacted = list_of_si
69
+
70
+ def add_system_impacted(self, system_impacted: str):
71
+ if self._systems_impacted is None:
72
+ self._systems_impacted = []
73
+ self._systems_impacted.append(system_impacted)
74
+
75
+ def remove_system_impacted(self, system_impacted: str):
76
+ if self._systems_impacted is not None:
77
+ self._systems_impacted.remove(system_impacted)
78
+
79
+ def clear_systems_impacted(self):
80
+ self._systems_impacted = []
81
+
82
+ def _format_traceback(self, e_traceback, e_message, max_field_len:int, max_traceback_lines:int):
83
+ if not e_traceback or e_traceback == 'None\n':
84
+ return None
85
+
86
+ traceback_lines = e_traceback.splitlines()
87
+
88
+ # Check if the traceback is within the limits
89
+ if len(traceback_lines) <= max_traceback_lines and len(e_traceback) <= max_field_len:
90
+ return e_traceback
91
+
92
+ # Remove lines that are part of the exception message if they are present in traceback
93
+ message_lines = e_message.splitlines() if e_message else []
94
+ if message_lines:
95
+ for message_line in message_lines:
96
+ if message_line in traceback_lines:
97
+ traceback_lines.remove(message_line)
98
+
99
+ # Filter out lines from third-party libraries (like site-packages)
100
+ filtered_lines = [line for line in traceback_lines if "site-packages" not in line]
101
+
102
+ # If filtering results in too few lines, revert to original traceback
103
+ if len(filtered_lines) < 2:
104
+ filtered_lines = traceback_lines
105
+
106
+ # Combine standalone bracket lines with previous or next lines
107
+ combined_lines = []
108
+ for line in filtered_lines:
109
+ if line.strip() in {"(", ")", "{", "}", "[", "]"} and combined_lines:
110
+ combined_lines[-1] += " " + line.strip()
111
+ else:
112
+ combined_lines.append(line)
113
+
114
+ # Ensure the number of lines doesn't exceed MAX_TRACEBACK_LINES
115
+ if len(combined_lines) > max_traceback_lines:
116
+ keep_lines_start = min(max_traceback_lines // 2, len(combined_lines))
117
+ keep_lines_end = min(max_traceback_lines // 2, len(combined_lines) - keep_lines_start)
118
+ combined_lines = (
119
+ combined_lines[:keep_lines_start] +
120
+ ['... (truncated) ...'] +
121
+ combined_lines[-keep_lines_end:]
122
+ )
123
+
124
+ formatted_traceback = '\n'.join(combined_lines)
125
+
126
+ # Ensure the total length doesn't exceed MAX_TRACEBACK_LENGTH
127
+ if len(formatted_traceback) > max_field_len:
128
+ truncated_length = max_field_len - len('... (truncated) ...')
129
+ half_truncated_length = truncated_length // 2
130
+ formatted_traceback = (
131
+ formatted_traceback[:half_truncated_length] +
132
+ '\n... (truncated) ...\n' +
133
+ formatted_traceback[-half_truncated_length:]
134
+ )
135
+ return formatted_traceback
136
+
137
+ def to_dict(self, max_field_len:int =10000, size_limit:float=256 * 1024 * 0.80,max_traceback_lines:int = 30):
138
+ size_limit = int(size_limit) # Ensure size_limit is an integer
139
+
140
+ # Unified list of all fields
141
+ systems_impacted_str = f"{len(self.systems_impacted)} system(s): " + " ,,, ".join(self.systems_impacted) if self.systems_impacted else None
142
+ fields = [
143
+ ("log_status", str(self.log_status.name)),
144
+ ("level_code", self.level.value),
145
+ ("level_name", str(self.level.name)),
146
+ ("base_context", str(self.base_context)),
147
+ ("timestamp", str(self.timestamp)),
148
+ ("collector_id", str(self.collector_id)),
149
+ ("systems_impacted", systems_impacted_str),
150
+ ("context", str(self.context)), # special sizing rules apply to it
151
+ ("subject", str(self.subject)),
152
+ ("description", str(self.description)),
153
+ ("exception_type", str(self.exception_type)),
154
+ ("exception_message", str(self.exception_message)),
155
+ ("exception_traceback", str(self._format_traceback(self.exception_traceback,self.exception_message, max_field_len, max_traceback_lines)))
156
+ ]
157
+
158
+ # Function to calculate the byte size of a JSON-encoded field
159
+ def field_size(key, value):
160
+ return len(json.dumps({key: value}).encode('utf-8'))
161
+
162
+ # Function to truncate a value based on its type
163
+ # Function to truncate a value based on its type
164
+ def truncate_value(value, max_size):
165
+ if isinstance(value, str):
166
+ half_size = max_size // 2
167
+ return value[:half_size] + '...' + value[-(max_size - half_size - 3):]
168
+ return value
169
+
170
+ # Ensure no field exceeds max_field_len
171
+ for i, (key, value) in enumerate(fields):
172
+ if isinstance(value, str) and len(value) > max_field_len:
173
+ fields[i] = (key, truncate_value(value, max_field_len))
174
+
175
+ # Ensure total size of the dict doesn't exceed size_limit
176
+ total_size = sum(field_size(key, value) for key, value in fields)
177
+ log_dict = {}
178
+ truncated = False
179
+
180
+ if total_size > size_limit:
181
+ truncated = True
182
+ remaining_size = size_limit
183
+ remaining_fields = len(fields)
184
+
185
+ for key, value in fields:
186
+ if remaining_fields > 0:
187
+ max_size_per_field = remaining_size // remaining_fields
188
+ else:
189
+ max_size_per_field = 0
190
+
191
+ field_sz = field_size(key, value)
192
+ if field_sz > max_size_per_field:
193
+ value = truncate_value(value, max_size_per_field)
194
+ field_sz = field_size(key, value)
195
+
196
+ log_dict[key] = value
197
+ remaining_size -= field_sz
198
+ remaining_fields -= 1
199
+ else:
200
+ log_dict = dict(fields)
201
+
202
+ log_dict['trunc'] = truncated
203
+
204
+ return log_dict
205
+
206
+ def __str__(self):
207
+ return json.dumps(self.to_dict(), indent=4)
208
+
209
+ def __repr__(self):
210
+ return self.__str__()
@@ -0,0 +1,103 @@
1
+ # pylint: disable=missing-module-docstring
2
+ # pylint: disable=missing-function-docstring
3
+ # pylint: disable=missing-class-docstring
4
+ # pylint: disable=broad-exception-caught
5
+ # pylint: disable=line-too-long
6
+ # pylint: disable=unused-variable
7
+ # pylint: disable=broad-exception-raised
8
+ import logging
9
+ import os
10
+ import json
11
+ import traceback
12
+ from typing import List, Union
13
+ from ipulse_shared_core_ftredge.enums import LoggingHandlers
14
+ from ipulse_shared_core_ftredge.utils.utils_cloud_gcp import add_gcp_cloud_logging, add_gcp_error_reporting
15
+
16
+ ###################################################################################################
17
+ ##################################################################################################
18
+ ##################################### SETTING UP LOGGER ##########################################
19
+
20
+ class CloudLogFormatter(logging.Formatter):
21
+ """Formats log records as structured JSON."""
22
+
23
+ def format(self, record):
24
+ log_entry = {
25
+ 'message': record.msg,
26
+ 'timestamp': self.formatTime(record, self.datefmt),
27
+ 'name': record.name,
28
+ 'severity': record.levelname,
29
+ 'pathname': record.pathname,
30
+ 'lineno': record.lineno,
31
+ }
32
+ if record.exc_info:
33
+ log_entry['exception_traceback'] = ''.join(traceback.format_exception(*record.exc_info))
34
+ if isinstance(record.msg, dict):
35
+ log_entry.update(record.msg)
36
+ return json.dumps(log_entry)
37
+
38
+
39
+ class LocalLogFormatter(logging.Formatter):
40
+ """Formats log records for local output to the console."""
41
+
42
+ def format(self, record): # Make sure you have the 'record' argument here!
43
+ path_parts = record.pathname.split(os.sep)
44
+
45
+ # Get the last two parts of the path if they exist
46
+ if len(path_parts) >= 2:
47
+ short_path = os.path.join(path_parts[-2], path_parts[-1])
48
+ else:
49
+ short_path = record.pathname
50
+
51
+ # Format log messages differently based on the log level
52
+ if record.levelno == logging.INFO:
53
+ log_message = f"[INFO] {self.formatTime(record, self.datefmt)} :: {record.msg}"
54
+ elif record.levelno == logging.DEBUG:
55
+ log_message = f"[DEBUG] {self.formatTime(record, self.datefmt)} :: {record.msg} :: {short_path} :: lineno {record.lineno} :: {record.name}"
56
+ elif record.levelno == logging.ERROR:
57
+ log_message = f"[ERROR] {self.formatTime(record, self.datefmt)} :: {record.msg} :: {short_path} :: lineno {record.lineno} :: {record.name}"
58
+ if record.exc_info:
59
+ log_message += "\n" + ''.join(traceback.format_exception(*record.exc_info))
60
+ else:
61
+ log_message = f"[{record.levelname}] {self.formatTime(record, self.datefmt)} :: {record.msg} :: {short_path} :: lineno {record.lineno} :: {record.name}"
62
+
63
+
64
+ return log_message
65
+
66
+
67
+ def get_logger( logger_name:str ,level=logging.INFO, logging_handler_providers: Union[LoggingHandlers, List[LoggingHandlers]] = LoggingHandlers.NONE):
68
+
69
+ """Creates and configures a logger with the specified handlers."""
70
+
71
+ logger = logging.getLogger(logger_name)
72
+ logger.setLevel(level)
73
+ cloud_formatter = CloudLogFormatter()
74
+
75
+ # Ensure logging_handler_providers is a list for consistent processing
76
+ if not isinstance(logging_handler_providers, list):
77
+ logging_handler_providers = [logging_handler_providers]
78
+
79
+ supported_remote_handlers = [
80
+ LoggingHandlers.GCP_CLOUD_LOGGING,
81
+ LoggingHandlers.GCP_ERROR_REPORTING,
82
+ LoggingHandlers.LOCAL_STREAM,
83
+ LoggingHandlers.NONE, # If NONE is considered a remote handler
84
+ ]
85
+
86
+ # Remote handlers
87
+
88
+ for handler_provider in logging_handler_providers:
89
+ if handler_provider in supported_remote_handlers:
90
+ if handler_provider == LoggingHandlers.GCP_CLOUD_LOGGING:
91
+ add_gcp_cloud_logging(logger, cloud_formatter)
92
+ elif handler_provider == LoggingHandlers.GCP_ERROR_REPORTING:
93
+ add_gcp_error_reporting(logger)
94
+ elif handler_provider == LoggingHandlers.LOCAL_STREAM: # Handle local stream
95
+ local_handler = logging.StreamHandler()
96
+ local_handler.setFormatter(LocalLogFormatter())
97
+ logger.addHandler(local_handler)
98
+ else:
99
+ raise ValueError(
100
+ f"Unsupported logging provider: {handler_provider}. "
101
+ f"Supported providers: {[h.value for h in supported_remote_handlers]}"
102
+ )
103
+ return logger
@@ -0,0 +1,53 @@
1
+ # pylint: disable=missing-module-docstring
2
+ # pylint: disable=missing-function-docstring
3
+ # pylint: disable=missing-class-docstring
4
+ # pylint: disable=broad-exception-caught
5
+ # pylint: disable=line-too-long
6
+ # pylint: disable=unused-variable
7
+ # pylint: disable=broad-exception-caught
8
+ from ipulse_shared_core_ftredge.enums import CloudProvider, DataSourceType, DuplicationHandling, MatchConditionType
9
+ from .utils_collector_pipelinemon import Pipelinemon
10
+ from .utils_cloud_gcp import (write_json_to_gcs_extended,
11
+ read_json_from_gcs)
12
+
13
+ #######################################################################################################################
14
+ #######################################################################################################################
15
+ ################################################# cloud IO functions ########################################
16
+
17
+ # Define the central function that routes to the relevant cloud-specific function
18
+ def write_json_to_cloud_storage_extended(cloud_storage:CloudProvider | DataSourceType, storage_client, data:dict | list | str, bucket_name: str, file_name: str,
19
+ duplication_handling:DuplicationHandling, duplication_match_condition_type: MatchConditionType, duplication_match_condition: str = "",
20
+ max_retries:int=2, max_matched_deletable_files:int=1,
21
+ pipelinemon: Pipelinemon = None, logger=None, print_out=False, raise_e=False):
22
+
23
+
24
+ supported_cloud_storage_values = [CloudProvider.GCP, DataSourceType.GCS]
25
+
26
+ if cloud_storage in [CloudProvider.GCP, DataSourceType.GCS]:
27
+ return write_json_to_gcs_extended(
28
+ pipelinemon=pipelinemon,
29
+ storage_client=storage_client,
30
+ data=data,
31
+ bucket_name=bucket_name,
32
+ file_name=file_name,
33
+ duplication_handling_enum=duplication_handling,
34
+ duplication_match_condition_type_enum=duplication_match_condition_type,
35
+ duplication_match_condition=duplication_match_condition,
36
+ max_retries=max_retries,
37
+ max_deletable_files=max_matched_deletable_files,
38
+ logger=logger,
39
+ print_out=print_out,
40
+ raise_e=raise_e
41
+ )
42
+
43
+ raise ValueError(f"Unsupported cloud storage : {cloud_storage}. Supported cloud storage values: {supported_cloud_storage_values}")
44
+
45
+
46
+ def read_json_from_cloud_storage(cloud_storage:CloudProvider | DataSourceType , storage_client, bucket_name:str, file_name:str, logger=None, print_out:bool=False):
47
+
48
+ supported_cloud_storage_values = [CloudProvider.GCP, DataSourceType.GCS]
49
+
50
+ if cloud_storage in [CloudProvider.GCP, DataSourceType.GCS]:
51
+ return read_json_from_gcs(storage_client=storage_client, bucket_name=bucket_name, file_name=file_name, logger=logger, print_out=print_out)
52
+
53
+ raise ValueError(f"Unsupported cloud storage: {cloud_storage}. Supported cloud storage values: {supported_cloud_storage_values}")