nui-lambda-shared-utils 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,177 @@
1
+ """
2
+ Enterprise-grade utilities for AWS Lambda functions with Slack, Elasticsearch, and monitoring integrations.
3
+ """
4
+
5
+ # Configuration system
6
+ from .config import (
7
+ Config,
8
+ get_config,
9
+ set_config,
10
+ configure,
11
+ get_es_host,
12
+ get_es_credentials_secret,
13
+ get_db_credentials_secret,
14
+ get_slack_credentials_secret,
15
+ )
16
+
17
+ # Core utilities
18
+ from .secrets_helper import (
19
+ get_secret,
20
+ get_database_credentials,
21
+ get_elasticsearch_credentials,
22
+ get_slack_credentials,
23
+ get_api_key,
24
+ clear_cache,
25
+ )
26
+
27
+ # Optional imports - only fail if actually used
28
+ try:
29
+ from .slack_client import SlackClient
30
+ except ImportError:
31
+ SlackClient = None
32
+
33
+ try:
34
+ from .es_client import ElasticsearchClient
35
+ except ImportError:
36
+ ElasticsearchClient = None
37
+
38
+ try:
39
+ from .db_client import DatabaseClient, get_pool_stats
40
+ except ImportError:
41
+ DatabaseClient = None
42
+ get_pool_stats = None
43
+
44
+ from .timezone import nz_time, format_nz_time
45
+
46
+ # Slack formatting utilities (no external dependencies)
47
+ from .slack_formatter import (
48
+ SlackBlockBuilder,
49
+ format_currency,
50
+ format_percentage,
51
+ format_number,
52
+ format_nz_time as format_nz_time_slack,
53
+ format_date_range,
54
+ format_daily_header,
55
+ format_weekly_header,
56
+ format_error_alert,
57
+ SERVICE_EMOJI,
58
+ SEVERITY_EMOJI,
59
+ STATUS_EMOJI,
60
+ )
61
+
62
+ # ES query builder - optional import
63
+ try:
64
+ from .es_query_builder import (
65
+ ESQueryBuilder,
66
+ build_error_rate_query,
67
+ build_top_errors_query,
68
+ build_response_time_query,
69
+ build_service_volume_query,
70
+ build_user_activity_query,
71
+ build_pattern_detection_query,
72
+ build_tender_participant_query,
73
+ )
74
+ except ImportError:
75
+ ESQueryBuilder = None
76
+ build_error_rate_query = None
77
+ build_top_errors_query = None
78
+ build_response_time_query = None
79
+ build_service_volume_query = None
80
+ build_user_activity_query = None
81
+ build_pattern_detection_query = None
82
+ build_tender_participant_query = None
83
+ from .error_handler import (
84
+ RetryableError,
85
+ NonRetryableError,
86
+ ErrorPatternMatcher,
87
+ ErrorAggregator,
88
+ with_retry,
89
+ retry_on_network_error,
90
+ retry_on_db_error,
91
+ retry_on_es_error,
92
+ handle_lambda_error,
93
+ categorize_retryable_error,
94
+ )
95
+ from .cloudwatch_metrics import (
96
+ MetricsPublisher,
97
+ MetricAggregator,
98
+ StandardMetrics,
99
+ TimedMetric,
100
+ track_lambda_performance,
101
+ create_service_dimensions,
102
+ publish_health_metric,
103
+ )
104
+
105
+
106
+ # Slack setup utilities (for CLI usage) - optional import
107
+ try:
108
+ from . import slack_setup
109
+ except ImportError:
110
+ slack_setup = None
111
+
112
+ __all__ = [
113
+ # Configuration system
114
+ "Config",
115
+ "get_config",
116
+ "set_config",
117
+ "configure",
118
+ "get_es_host",
119
+ "get_es_credentials_secret",
120
+ "get_db_credentials_secret",
121
+ "get_slack_credentials_secret",
122
+ # Core utilities
123
+ "get_secret",
124
+ "get_database_credentials",
125
+ "get_elasticsearch_credentials",
126
+ "get_slack_credentials",
127
+ "get_api_key",
128
+ "clear_cache",
129
+ "SlackClient",
130
+ "ElasticsearchClient",
131
+ "DatabaseClient",
132
+ "get_pool_stats",
133
+ "nz_time",
134
+ "format_nz_time",
135
+ "slack_setup",
136
+ # Slack formatting
137
+ "SlackBlockBuilder",
138
+ "format_currency",
139
+ "format_percentage",
140
+ "format_number",
141
+ "format_nz_time_slack",
142
+ "format_date_range",
143
+ "format_daily_header",
144
+ "format_weekly_header",
145
+ "format_error_alert",
146
+ "SERVICE_EMOJI",
147
+ "SEVERITY_EMOJI",
148
+ "STATUS_EMOJI",
149
+ # ES query building
150
+ "ESQueryBuilder",
151
+ "build_error_rate_query",
152
+ "build_top_errors_query",
153
+ "build_response_time_query",
154
+ "build_service_volume_query",
155
+ "build_user_activity_query",
156
+ "build_pattern_detection_query",
157
+ "build_tender_participant_query",
158
+ # Error handling
159
+ "RetryableError",
160
+ "NonRetryableError",
161
+ "ErrorPatternMatcher",
162
+ "ErrorAggregator",
163
+ "with_retry",
164
+ "retry_on_network_error",
165
+ "retry_on_db_error",
166
+ "retry_on_es_error",
167
+ "handle_lambda_error",
168
+ "categorize_retryable_error",
169
+ # CloudWatch metrics
170
+ "MetricsPublisher",
171
+ "MetricAggregator",
172
+ "StandardMetrics",
173
+ "TimedMetric",
174
+ "track_lambda_performance",
175
+ "create_service_dimensions",
176
+ "publish_health_metric",
177
+ ]
@@ -0,0 +1,367 @@
1
+ """
2
+ CloudWatch metrics publishing utilities for Lambda services.
3
+ Provides efficient batching and standardized metric publishing patterns.
4
+ """
5
+
6
+ import os
7
+ import time
8
+ import logging
9
+ from typing import Dict, List, Optional, Union, Any
10
+ from datetime import datetime
11
+ from collections import defaultdict
12
+ import boto3
13
+ from botocore.exceptions import ClientError
14
+
15
+ log = logging.getLogger(__name__)
16
+
17
+
18
+ class MetricsPublisher:
19
+ """
20
+ Efficient CloudWatch metrics publisher with batching support.
21
+
22
+ Example:
23
+ publisher = MetricsPublisher('Application')
24
+ publisher.put_metric('RecordsProcessed', 150, unit='Count')
25
+ publisher.put_metric('ResponseTime', 245.5, unit='Milliseconds')
26
+ publisher.flush() # Send all metrics
27
+ """
28
+
29
+ def __init__(
30
+ self,
31
+ namespace: str,
32
+ dimensions: Optional[Dict[str, str]] = None,
33
+ auto_flush_size: int = 20,
34
+ region: Optional[str] = None,
35
+ ):
36
+ """
37
+ Initialize metrics publisher.
38
+
39
+ Args:
40
+ namespace: CloudWatch namespace for metrics
41
+ dimensions: Default dimensions to apply to all metrics
42
+ auto_flush_size: Automatically flush when batch reaches this size
43
+ region: AWS region (uses default if not specified)
44
+ """
45
+ self.namespace = namespace
46
+ self.default_dimensions = dimensions or {}
47
+ self.auto_flush_size = auto_flush_size
48
+ self.client = boto3.client("cloudwatch", region_name=region)
49
+ self.metric_buffer: List[Dict] = []
50
+
51
+ def put_metric(
52
+ self,
53
+ metric_name: str,
54
+ value: Union[int, float],
55
+ unit: str = "None",
56
+ timestamp: Optional[datetime] = None,
57
+ dimensions: Optional[Dict[str, str]] = None,
58
+ storage_resolution: int = 60,
59
+ ) -> None:
60
+ """
61
+ Add a metric to the buffer.
62
+
63
+ Args:
64
+ metric_name: Name of the metric
65
+ value: Metric value
66
+ unit: CloudWatch unit (Count, Milliseconds, Bytes, etc.)
67
+ timestamp: Metric timestamp (defaults to now)
68
+ dimensions: Additional dimensions for this metric
69
+ storage_resolution: 1 for high-resolution, 60 for standard
70
+ """
71
+ metric_data = {
72
+ "MetricName": metric_name,
73
+ "Value": float(value),
74
+ "Unit": unit,
75
+ "Timestamp": timestamp or datetime.utcnow(),
76
+ "StorageResolution": storage_resolution,
77
+ }
78
+
79
+ # Merge dimensions
80
+ all_dimensions = {**self.default_dimensions}
81
+ if dimensions:
82
+ all_dimensions.update(dimensions)
83
+
84
+ if all_dimensions:
85
+ metric_data["Dimensions"] = [{"Name": k, "Value": str(v)} for k, v in all_dimensions.items()]
86
+
87
+ self.metric_buffer.append(metric_data)
88
+
89
+ # Auto-flush if buffer is full
90
+ if len(self.metric_buffer) >= self.auto_flush_size:
91
+ self.flush()
92
+
93
+ def put_metric_with_statistics(
94
+ self,
95
+ metric_name: str,
96
+ values: List[Union[int, float]],
97
+ unit: str = "None",
98
+ timestamp: Optional[datetime] = None,
99
+ dimensions: Optional[Dict[str, str]] = None,
100
+ ) -> None:
101
+ """
102
+ Add a metric with statistical values.
103
+
104
+ Args:
105
+ metric_name: Name of the metric
106
+ values: List of values to calculate statistics from
107
+ unit: CloudWatch unit
108
+ timestamp: Metric timestamp
109
+ dimensions: Additional dimensions
110
+ """
111
+ if not values:
112
+ return
113
+
114
+ # Calculate statistics
115
+ sorted_values = sorted(values)
116
+ count = len(values)
117
+ sum_value = sum(values)
118
+ min_value = sorted_values[0]
119
+ max_value = sorted_values[-1]
120
+
121
+ metric_data = {
122
+ "MetricName": metric_name,
123
+ "StatisticValues": {"SampleCount": count, "Sum": sum_value, "Minimum": min_value, "Maximum": max_value},
124
+ "Unit": unit,
125
+ "Timestamp": timestamp or datetime.utcnow(),
126
+ }
127
+
128
+ # Merge dimensions
129
+ all_dimensions = {**self.default_dimensions}
130
+ if dimensions:
131
+ all_dimensions.update(dimensions)
132
+
133
+ if all_dimensions:
134
+ metric_data["Dimensions"] = [{"Name": k, "Value": str(v)} for k, v in all_dimensions.items()]
135
+
136
+ self.metric_buffer.append(metric_data)
137
+
138
+ if len(self.metric_buffer) >= self.auto_flush_size:
139
+ self.flush()
140
+
141
+ def flush(self) -> bool:
142
+ """
143
+ Send all buffered metrics to CloudWatch.
144
+
145
+ Returns:
146
+ bool: True if successful, False otherwise
147
+ """
148
+ if not self.metric_buffer:
149
+ return True
150
+
151
+ try:
152
+ # CloudWatch allows max 20 metrics per request
153
+ for i in range(0, len(self.metric_buffer), 20):
154
+ batch = self.metric_buffer[i : i + 20]
155
+ self.client.put_metric_data(Namespace=self.namespace, MetricData=batch)
156
+
157
+ log.info(f"Published {len(self.metric_buffer)} metrics to {self.namespace}")
158
+ self.metric_buffer = []
159
+ return True
160
+
161
+ except ClientError as e:
162
+ log.error(f"Failed to publish metrics: {e}")
163
+ return False
164
+
165
+ def __enter__(self):
166
+ """Context manager support."""
167
+ return self
168
+
169
+ def __exit__(self, exc_type, exc_val, exc_tb):
170
+ """Ensure metrics are flushed on exit."""
171
+ self.flush()
172
+
173
+
174
+ class MetricAggregator:
175
+ """
176
+ Aggregate metrics over time before publishing.
177
+ Useful for high-frequency metrics that need aggregation.
178
+ """
179
+
180
+ def __init__(self, publisher: MetricsPublisher):
181
+ self.publisher = publisher
182
+ self.aggregates: Dict[str, List[float]] = defaultdict(list)
183
+ self.counters: Dict[str, float] = defaultdict(float)
184
+
185
+ def add_value(self, metric_name: str, value: Union[int, float]) -> None:
186
+ """Add a value to be aggregated."""
187
+ self.aggregates[metric_name].append(float(value))
188
+
189
+ def increment_counter(self, metric_name: str, value: Union[int, float] = 1) -> None:
190
+ """Increment a counter metric."""
191
+ self.counters[metric_name] += value
192
+
193
+ def publish_aggregates(self, unit: str = "None", dimensions: Optional[Dict[str, str]] = None) -> None:
194
+ """Publish all aggregated metrics with statistics."""
195
+ # Publish aggregated values
196
+ for metric_name, values in self.aggregates.items():
197
+ if values:
198
+ self.publisher.put_metric_with_statistics(metric_name, values, unit=unit, dimensions=dimensions)
199
+
200
+ # Publish counters
201
+ for metric_name, value in self.counters.items():
202
+ self.publisher.put_metric(metric_name, value, unit="Count", dimensions=dimensions)
203
+
204
+ # Clear aggregates
205
+ self.aggregates.clear()
206
+ self.counters.clear()
207
+
208
+
209
+ # Standard metric names for consistency across services
210
+ class StandardMetrics:
211
+ """Standard metric names used across application services."""
212
+
213
+ # Service health metrics
214
+ SERVICE_HEALTH = "ServiceHealth"
215
+ ERROR_RATE = "ErrorRate"
216
+ RESPONSE_TIME = "ResponseTime"
217
+ REQUEST_COUNT = "RequestCount"
218
+
219
+ # Business metrics
220
+ RECORDS_CREATED = "RecordsCreated"
221
+ RECORDS_PROCESSED = "RecordsProcessed"
222
+ USERS_ACTIVE = "ActiveUsers"
223
+ REVENUE_PROCESSED = "RevenueProcessed"
224
+
225
+ # Lambda metrics
226
+ LAMBDA_DURATION = "LambdaDuration"
227
+ LAMBDA_ERRORS = "LambdaErrors"
228
+ LAMBDA_THROTTLES = "LambdaThrottles"
229
+ LAMBDA_COLD_STARTS = "LambdaColdStarts"
230
+
231
+ # Database metrics
232
+ DB_QUERY_TIME = "DatabaseQueryTime"
233
+ DB_CONNECTION_ERRORS = "DatabaseConnectionErrors"
234
+ DB_ACTIVE_CONNECTIONS = "DatabaseActiveConnections"
235
+
236
+ # Elasticsearch metrics
237
+ ES_QUERY_TIME = "ElasticsearchQueryTime"
238
+ ES_QUERY_ERRORS = "ElasticsearchQueryErrors"
239
+ ES_DOCUMENT_COUNT = "ElasticsearchDocumentCount"
240
+
241
+ # External API metrics
242
+ API_CALL_DURATION = "ExternalAPICallDuration"
243
+ API_CALL_ERRORS = "ExternalAPICallErrors"
244
+ API_RATE_LIMIT_HITS = "APIRateLimitHits"
245
+
246
+
247
+ def track_lambda_performance(namespace: str = "Application"):
248
+ """
249
+ Decorator to automatically track Lambda function performance.
250
+
251
+ Example:
252
+ @track_lambda_performance()
253
+ def handler(event, context):
254
+ # Your Lambda logic
255
+ return response
256
+ """
257
+
258
+ def decorator(func):
259
+ def wrapper(event, context):
260
+ start_time = time.time()
261
+ cold_start = not hasattr(context, "is_warm")
262
+
263
+ publisher = MetricsPublisher(
264
+ namespace,
265
+ dimensions={"FunctionName": context.function_name, "Environment": os.environ.get("STAGE", "dev")},
266
+ )
267
+
268
+ try:
269
+ # Mark as warm for next invocation
270
+ context.is_warm = True
271
+
272
+ # Track cold start
273
+ if cold_start:
274
+ publisher.put_metric(StandardMetrics.LAMBDA_COLD_STARTS, 1, unit="Count")
275
+
276
+ # Execute function
277
+ result = func(event, context)
278
+
279
+ # Track success metrics
280
+ duration = (time.time() - start_time) * 1000 # Convert to milliseconds
281
+ publisher.put_metric(StandardMetrics.LAMBDA_DURATION, duration, unit="Milliseconds")
282
+ publisher.put_metric(StandardMetrics.REQUEST_COUNT, 1, unit="Count")
283
+
284
+ return result
285
+
286
+ except Exception as e:
287
+ # Track error
288
+ publisher.put_metric(StandardMetrics.LAMBDA_ERRORS, 1, unit="Count")
289
+ raise
290
+
291
+ finally:
292
+ # Ensure metrics are published
293
+ publisher.flush()
294
+
295
+ return wrapper
296
+
297
+ return decorator
298
+
299
+
300
+ def create_service_dimensions(service_name: str, environment: Optional[str] = None) -> Dict[str, str]:
301
+ """
302
+ Create standard dimensions for service metrics.
303
+
304
+ Args:
305
+ service_name: Name of the service
306
+ environment: Environment (defaults to STAGE env var)
307
+
308
+ Returns:
309
+ Dict of dimensions
310
+ """
311
+ dimensions = {"ServiceName": service_name, "Environment": environment or os.environ.get("STAGE", "dev")}
312
+
313
+ # Add region if available
314
+ region = os.environ.get("AWS_REGION", os.environ.get("AWS_DEFAULT_REGION"))
315
+ if region:
316
+ dimensions["Region"] = region
317
+
318
+ return dimensions
319
+
320
+
321
+ def publish_health_metric(service_name: str, is_healthy: bool, namespace: str = "Application") -> None:
322
+ """
323
+ Publish a simple health metric for a service.
324
+
325
+ Args:
326
+ service_name: Name of the service
327
+ is_healthy: Whether the service is healthy
328
+ namespace: CloudWatch namespace
329
+ """
330
+ publisher = MetricsPublisher(namespace, dimensions=create_service_dimensions(service_name))
331
+
332
+ publisher.put_metric(StandardMetrics.SERVICE_HEALTH, 1 if is_healthy else 0, unit="None")
333
+
334
+ publisher.flush()
335
+
336
+
337
+ class TimedMetric:
338
+ """
339
+ Context manager for timing operations and publishing metrics.
340
+
341
+ Example:
342
+ publisher = MetricsPublisher('MyApp')
343
+ with TimedMetric(publisher, 'DatabaseQuery', unit='Milliseconds'):
344
+ # Perform database query
345
+ results = db.query("SELECT * FROM records")
346
+ """
347
+
348
+ def __init__(
349
+ self,
350
+ publisher: MetricsPublisher,
351
+ metric_name: str,
352
+ unit: str = "Milliseconds",
353
+ dimensions: Optional[Dict[str, str]] = None,
354
+ ):
355
+ self.publisher = publisher
356
+ self.metric_name = metric_name
357
+ self.unit = unit
358
+ self.dimensions = dimensions
359
+ self.start_time = None
360
+
361
+ def __enter__(self):
362
+ self.start_time = time.time()
363
+ return self
364
+
365
+ def __exit__(self, exc_type, exc_val, exc_tb):
366
+ duration = (time.time() - self.start_time) * 1000 # Convert to milliseconds
367
+ self.publisher.put_metric(self.metric_name, duration, unit=self.unit, dimensions=self.dimensions)
@@ -0,0 +1,127 @@
1
+ """
2
+ Configuration system for AWS Lambda shared utilities.
3
+
4
+ This module provides configurable defaults and environment-based overrides
5
+ to make the library suitable for different deployment environments.
6
+ """
7
+
8
+ import os
9
+ from typing import Dict, Optional, Any
10
+
11
+
12
+ class Config:
13
+ """Configuration class for AWS Lambda shared utilities with environment-based overrides."""
14
+
15
+ def __init__(
16
+ self,
17
+ # Elasticsearch configuration
18
+ es_host: Optional[str] = None,
19
+ es_credentials_secret: Optional[str] = None,
20
+ # Database configuration
21
+ db_credentials_secret: Optional[str] = None,
22
+ # Slack configuration
23
+ slack_credentials_secret: Optional[str] = None,
24
+ # AWS configuration
25
+ aws_region: Optional[str] = None,
26
+ ):
27
+ """
28
+ Initialize configuration with optional overrides.
29
+
30
+ Args:
31
+ es_host: Elasticsearch host (default: localhost:9200)
32
+ es_credentials_secret: AWS secret name for ES credentials
33
+ db_credentials_secret: AWS secret name for database credentials
34
+ slack_credentials_secret: AWS secret name for Slack credentials
35
+ aws_region: AWS region for secrets/services
36
+ """
37
+ # Elasticsearch settings
38
+ self.es_host = es_host or os.environ.get("ES_HOST") or os.environ.get("ELASTICSEARCH_HOST") or "localhost:9200"
39
+
40
+ self.es_credentials_secret = (
41
+ es_credentials_secret
42
+ or os.environ.get("ES_CREDENTIALS_SECRET")
43
+ or os.environ.get("ELASTICSEARCH_CREDENTIALS_SECRET")
44
+ or "elasticsearch-credentials"
45
+ )
46
+
47
+ # Database settings
48
+ self.db_credentials_secret = (
49
+ db_credentials_secret
50
+ or os.environ.get("DB_CREDENTIALS_SECRET")
51
+ or os.environ.get("DATABASE_CREDENTIALS_SECRET")
52
+ or "database-credentials"
53
+ )
54
+
55
+ # Slack settings
56
+ self.slack_credentials_secret = (
57
+ slack_credentials_secret or os.environ.get("SLACK_CREDENTIALS_SECRET") or "slack-credentials"
58
+ )
59
+
60
+ # AWS settings
61
+ self.aws_region = (
62
+ aws_region or os.environ.get("AWS_REGION") or os.environ.get("AWS_DEFAULT_REGION") or "us-east-1"
63
+ )
64
+
65
+ def to_dict(self) -> Dict[str, Any]:
66
+ """Return configuration as dictionary for debugging/logging."""
67
+ return {
68
+ "es_host": self.es_host,
69
+ "es_credentials_secret": self.es_credentials_secret,
70
+ "db_credentials_secret": self.db_credentials_secret,
71
+ "slack_credentials_secret": self.slack_credentials_secret,
72
+ "aws_region": self.aws_region,
73
+ }
74
+
75
+
76
+ # Global default configuration instance
77
+ _default_config = None
78
+
79
+
80
+ def get_config() -> Config:
81
+ """Get the current global configuration instance."""
82
+ global _default_config
83
+ if _default_config is None:
84
+ _default_config = Config()
85
+ return _default_config
86
+
87
+
88
+ def set_config(config: Config) -> None:
89
+ """Set a new global configuration instance."""
90
+ global _default_config
91
+ _default_config = config
92
+
93
+
94
+ def configure(**kwargs) -> Config:
95
+ """
96
+ Configure the global configuration with keyword arguments.
97
+
98
+ This is a convenience function equivalent to:
99
+ set_config(Config(**kwargs))
100
+
101
+ Returns:
102
+ The new configuration instance
103
+ """
104
+ config = Config(**kwargs)
105
+ set_config(config)
106
+ return config
107
+
108
+
109
+ # Legacy compatibility - environment variable checking functions
110
+ def get_es_host() -> str:
111
+ """Get Elasticsearch host from configuration (legacy compatibility)."""
112
+ return get_config().es_host
113
+
114
+
115
+ def get_es_credentials_secret() -> str:
116
+ """Get Elasticsearch credentials secret name (legacy compatibility)."""
117
+ return get_config().es_credentials_secret
118
+
119
+
120
+ def get_db_credentials_secret() -> str:
121
+ """Get database credentials secret name (legacy compatibility)."""
122
+ return get_config().db_credentials_secret
123
+
124
+
125
+ def get_slack_credentials_secret() -> str:
126
+ """Get Slack credentials secret name (legacy compatibility)."""
127
+ return get_config().slack_credentials_secret