nui-python-shared-utils 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,225 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ CLI tools for AWS Lambda utilities and Slack workspace automation.
4
+ """
5
+ import os
6
+ import sys
7
+ import logging
8
+ from pathlib import Path
9
+
10
+ import click
11
+
12
+ from .slack_setup import ChannelCreator, load_channel_config
13
+ from .slack_setup.channel_definitions import validate_channel_names, generate_serverless_config
14
+ from .slack_setup.setup_helpers import (
15
+ SlackSetupHelper,
16
+ prompt_for_token,
17
+ print_channel_summary,
18
+ confirm_channel_creation,
19
+ )
20
+
21
+ # Set up logging
22
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
23
+ log = logging.getLogger(__name__)
24
+
25
+
26
+ @click.group()
27
+ @click.version_option(version='0.1.0')
28
+ def cli():
29
+ """AWS Lambda utilities and Slack workspace automation."""
30
+ pass
31
+
32
+
33
+ @cli.group()
34
+ def slack():
35
+ """Slack workspace automation commands."""
36
+ pass
37
+
38
+
39
+ @slack.command("setup-channels")
40
+ @click.option("--config", required=True, type=click.Path(exists=True), help="Path to channel configuration YAML file")
41
+ @click.option("--token", envvar="SLACK_BOT_TOKEN", help="Slack bot token (or use SLACK_BOT_TOKEN env var)")
42
+ @click.option("--check-only", is_flag=True, help="Only check existing channels, do not create")
43
+ @click.option("--dry-run", is_flag=True, help="Preview changes without creating channels")
44
+ @click.option("--output", type=click.Path(), help="Output file for environment variable configuration")
45
+ @click.option(
46
+ "--output-format",
47
+ type=click.Choice(["yaml", "env"], case_sensitive=False),
48
+ default="yaml",
49
+ help="Output format for configuration",
50
+ )
51
+ @click.option("--no-interactive", is_flag=True, help="Skip interactive confirmations")
52
+ @click.option("--validate-only", is_flag=True, help="Only validate channel names, do not create")
53
+ @click.option("--test-access", is_flag=True, help="Test bot access to created channels")
54
+ def setup_channels(config, token, check_only, dry_run, output, output_format, no_interactive, validate_only, test_access):
55
+ """Set up Slack channels from YAML configuration.
56
+
57
+ Automates channel creation, topic/purpose configuration, user invitations,
58
+ and bot setup for team workspaces.
59
+
60
+ \b
61
+ Examples:
62
+ # Create channels from config
63
+ slack-channel-setup --config channels.yaml
64
+
65
+ # Check existing channels only
66
+ slack-channel-setup --config channels.yaml --check-only
67
+
68
+ # Generate environment variables
69
+ slack-channel-setup --config channels.yaml --output channels.env --output-format env
70
+
71
+ # Use specific token
72
+ SLACK_BOT_TOKEN=xoxb-... slack-channel-setup --config channels.yaml
73
+ """
74
+ try:
75
+ # Load channel configuration
76
+ log.info(f"Loading configuration from {config}")
77
+ definitions = load_channel_config(config)
78
+
79
+ if not definitions:
80
+ click.echo(click.style("āŒ No channel definitions found in config", fg="red"), err=True)
81
+ sys.exit(1)
82
+
83
+ # Extract service/project name from first definition or config path
84
+ service_name = definitions[0].service or Path(config).parent.parent.name
85
+
86
+ click.echo(f"\nšŸš€ Slack Channel Setup for {service_name}")
87
+ click.echo("=" * 60)
88
+
89
+ # Validate channel names
90
+ errors = validate_channel_names(definitions)
91
+ if errors:
92
+ click.echo(click.style("Channel name validation failed:", fg="red"), err=True)
93
+ for error in errors:
94
+ click.echo(f" āŒ {error}")
95
+ sys.exit(1)
96
+
97
+ if validate_only:
98
+ click.echo(click.style("āœ… All channel names are valid", fg="green"))
99
+ return
100
+
101
+ # Get Slack token
102
+ if not token and not check_only:
103
+ token = prompt_for_token()
104
+ os.environ["SLACK_BOT_TOKEN"] = token
105
+
106
+ # Initialize channel creator
107
+ creator = ChannelCreator(token)
108
+ helper = SlackSetupHelper(token)
109
+
110
+ # Check bot authentication
111
+ auth_info = helper.validate_bot_permissions()
112
+ if auth_info.get("authenticated"):
113
+ click.echo(f"\nāœ… Authenticated as: {auth_info['bot_name']} (@{auth_info['bot_id']})")
114
+ click.echo(f" Team: {auth_info['team']}")
115
+ else:
116
+ click.echo(
117
+ click.style(f"āŒ Authentication failed: {auth_info.get('error')}", fg="red"),
118
+ err=True
119
+ )
120
+ sys.exit(1)
121
+
122
+ # Check existing channels
123
+ channel_names = [d.name for d in definitions]
124
+ existing_channels = creator.check_existing_channels(channel_names)
125
+
126
+ click.echo("\nšŸ“Š Channel Status:")
127
+ for name, channel_id in existing_channels.items():
128
+ if channel_id:
129
+ click.echo(click.style(f" āœ… #{name} exists (ID: {channel_id})", fg="green"))
130
+ else:
131
+ click.echo(click.style(f" āŒ #{name} does not exist", fg="yellow"))
132
+
133
+ if check_only or dry_run:
134
+ if dry_run:
135
+ click.echo("\nšŸ” Dry run - showing what would be created:")
136
+ for definition in definitions:
137
+ exists = existing_channels.get(definition.name)
138
+ if exists:
139
+ click.echo(f" • #{definition.name} - Would update (exists)")
140
+ else:
141
+ click.echo(f" • #{definition.name} - Would create (new)")
142
+ click.echo("\nNo changes made (dry run mode)")
143
+ return
144
+
145
+ # Confirm creation
146
+ if not no_interactive:
147
+ if not confirm_channel_creation(definitions, existing_channels):
148
+ click.echo("\nChannel creation cancelled")
149
+ return
150
+
151
+ # Create/configure channels
152
+ click.echo("\nšŸ”Ø Setting up channels...")
153
+ with click.progressbar(
154
+ definitions,
155
+ label="Creating channels",
156
+ item_show_func=lambda d: f"#{d.name}" if d else ""
157
+ ) as bar:
158
+ channel_map = {}
159
+ for definition in bar:
160
+ try:
161
+ channel_id = creator._create_or_get_channel(definition)
162
+ if channel_id:
163
+ channel_map[definition.name] = channel_id
164
+ creator._bot_join_channel(channel_id, definition.name)
165
+ creator._configure_channel(channel_id, definition)
166
+ if definition.invite_users:
167
+ creator._invite_users(channel_id, definition.invite_users, definition.name)
168
+ creator._post_welcome_message(channel_id, definition)
169
+ except Exception as e:
170
+ click.echo(click.style(f"\n āš ļø Error setting up #{definition.name}: {e}", fg="yellow"))
171
+
172
+ if not channel_map:
173
+ click.echo(click.style("āŒ No channels were created", fg="red"), err=True)
174
+ sys.exit(1)
175
+
176
+ # Test access if requested
177
+ if test_access:
178
+ click.echo("\n🧪 Testing channel access...")
179
+ test_results = helper.test_channel_access(list(channel_map.values()))
180
+ for channel_id, success in test_results.items():
181
+ channel_name = [k for k, v in channel_map.items() if v == channel_id][0]
182
+ if success:
183
+ click.echo(click.style(f" āœ… Can post to #{channel_name}", fg="green"))
184
+ else:
185
+ click.echo(click.style(f" āŒ Cannot post to #{channel_name}", fg="red"))
186
+
187
+ # Generate configuration
188
+ if output or not no_interactive:
189
+ config_output = generate_serverless_config(channel_map, service_name, output_format)
190
+
191
+ if output:
192
+ with open(output, "w") as f:
193
+ f.write(config_output)
194
+ click.echo(click.style(f"\nāœ… Configuration written to {output}", fg="green"))
195
+ else:
196
+ click.echo("\n" + "=" * 60)
197
+ click.echo("Configuration Output:")
198
+ click.echo("=" * 60)
199
+ click.echo(config_output)
200
+
201
+ # Print summary
202
+ print_channel_summary(channel_map, service_name)
203
+
204
+ except FileNotFoundError as e:
205
+ click.echo(click.style(f"āŒ Configuration file not found: {e}", fg="red"), err=True)
206
+ sys.exit(1)
207
+ except ValueError as e:
208
+ click.echo(click.style(f"āŒ Configuration error: {e}", fg="red"), err=True)
209
+ sys.exit(1)
210
+ except KeyboardInterrupt:
211
+ click.echo("\n\nāš ļø Setup cancelled by user")
212
+ sys.exit(130)
213
+ except Exception as e:
214
+ click.echo(click.style(f"āŒ Unexpected error: {e}", fg="red"), err=True)
215
+ log.error("Unexpected error", exc_info=True)
216
+ sys.exit(1)
217
+
218
+
219
+ def main():
220
+ """Entry point for CLI."""
221
+ cli()
222
+
223
+
224
+ if __name__ == "__main__":
225
+ main()
@@ -0,0 +1,367 @@
1
+ """
2
+ CloudWatch metrics publishing utilities for Lambda services.
3
+ Provides efficient batching and standardized metric publishing patterns.
4
+ """
5
+
6
+ import os
7
+ import time
8
+ import logging
9
+ from typing import Dict, List, Optional, Union
10
+ from datetime import datetime
11
+ from collections import defaultdict
12
+ import boto3
13
+ from botocore.exceptions import ClientError
14
+
15
+ log = logging.getLogger(__name__)
16
+
17
+
18
+ class MetricsPublisher:
19
+ """
20
+ Efficient CloudWatch metrics publisher with batching support.
21
+
22
+ Example:
23
+ publisher = MetricsPublisher('Application')
24
+ publisher.put_metric('RecordsProcessed', 150, unit='Count')
25
+ publisher.put_metric('ResponseTime', 245.5, unit='Milliseconds')
26
+ publisher.flush() # Send all metrics
27
+ """
28
+
29
+ def __init__(
30
+ self,
31
+ namespace: str,
32
+ dimensions: Optional[Dict[str, str]] = None,
33
+ auto_flush_size: int = 20,
34
+ region: Optional[str] = None,
35
+ ):
36
+ """
37
+ Initialize metrics publisher.
38
+
39
+ Args:
40
+ namespace: CloudWatch namespace for metrics
41
+ dimensions: Default dimensions to apply to all metrics
42
+ auto_flush_size: Automatically flush when batch reaches this size
43
+ region: AWS region (uses default if not specified)
44
+ """
45
+ self.namespace = namespace
46
+ self.default_dimensions = dimensions or {}
47
+ self.auto_flush_size = auto_flush_size
48
+ self.client = boto3.client("cloudwatch", region_name=region)
49
+ self.metric_buffer: List[Dict] = []
50
+
51
+ def put_metric(
52
+ self,
53
+ metric_name: str,
54
+ value: Union[int, float],
55
+ unit: str = "None",
56
+ timestamp: Optional[datetime] = None,
57
+ dimensions: Optional[Dict[str, str]] = None,
58
+ storage_resolution: int = 60,
59
+ ) -> None:
60
+ """
61
+ Add a metric to the buffer.
62
+
63
+ Args:
64
+ metric_name: Name of the metric
65
+ value: Metric value
66
+ unit: CloudWatch unit (Count, Milliseconds, Bytes, etc.)
67
+ timestamp: Metric timestamp (defaults to now)
68
+ dimensions: Additional dimensions for this metric
69
+ storage_resolution: 1 for high-resolution, 60 for standard
70
+ """
71
+ metric_data = {
72
+ "MetricName": metric_name,
73
+ "Value": float(value),
74
+ "Unit": unit,
75
+ "Timestamp": timestamp or datetime.utcnow(),
76
+ "StorageResolution": storage_resolution,
77
+ }
78
+
79
+ # Merge dimensions
80
+ all_dimensions = {**self.default_dimensions}
81
+ if dimensions:
82
+ all_dimensions.update(dimensions)
83
+
84
+ if all_dimensions:
85
+ metric_data["Dimensions"] = [{"Name": k, "Value": str(v)} for k, v in all_dimensions.items()]
86
+
87
+ self.metric_buffer.append(metric_data)
88
+
89
+ # Auto-flush if buffer is full
90
+ if len(self.metric_buffer) >= self.auto_flush_size:
91
+ self.flush()
92
+
93
+ def put_metric_with_statistics(
94
+ self,
95
+ metric_name: str,
96
+ values: List[Union[int, float]],
97
+ unit: str = "None",
98
+ timestamp: Optional[datetime] = None,
99
+ dimensions: Optional[Dict[str, str]] = None,
100
+ ) -> None:
101
+ """
102
+ Add a metric with statistical values.
103
+
104
+ Args:
105
+ metric_name: Name of the metric
106
+ values: List of values to calculate statistics from
107
+ unit: CloudWatch unit
108
+ timestamp: Metric timestamp
109
+ dimensions: Additional dimensions
110
+ """
111
+ if not values:
112
+ return
113
+
114
+ # Calculate statistics
115
+ sorted_values = sorted(values)
116
+ count = len(values)
117
+ sum_value = sum(values)
118
+ min_value = sorted_values[0]
119
+ max_value = sorted_values[-1]
120
+
121
+ metric_data = {
122
+ "MetricName": metric_name,
123
+ "StatisticValues": {"SampleCount": count, "Sum": sum_value, "Minimum": min_value, "Maximum": max_value},
124
+ "Unit": unit,
125
+ "Timestamp": timestamp or datetime.utcnow(),
126
+ }
127
+
128
+ # Merge dimensions
129
+ all_dimensions = {**self.default_dimensions}
130
+ if dimensions:
131
+ all_dimensions.update(dimensions)
132
+
133
+ if all_dimensions:
134
+ metric_data["Dimensions"] = [{"Name": k, "Value": str(v)} for k, v in all_dimensions.items()]
135
+
136
+ self.metric_buffer.append(metric_data)
137
+
138
+ if len(self.metric_buffer) >= self.auto_flush_size:
139
+ self.flush()
140
+
141
+ def flush(self) -> bool:
142
+ """
143
+ Send all buffered metrics to CloudWatch.
144
+
145
+ Returns:
146
+ bool: True if successful, False otherwise
147
+ """
148
+ if not self.metric_buffer:
149
+ return True
150
+
151
+ try:
152
+ # CloudWatch allows max 20 metrics per request
153
+ for i in range(0, len(self.metric_buffer), 20):
154
+ batch = self.metric_buffer[i : i + 20]
155
+ self.client.put_metric_data(Namespace=self.namespace, MetricData=batch)
156
+
157
+ log.info(f"Published {len(self.metric_buffer)} metrics to {self.namespace}")
158
+ self.metric_buffer = []
159
+ return True
160
+
161
+ except ClientError as e:
162
+ log.error(f"Failed to publish metrics: {e}")
163
+ return False
164
+
165
+ def __enter__(self):
166
+ """Context manager support."""
167
+ return self
168
+
169
+ def __exit__(self, exc_type, exc_val, exc_tb):
170
+ """Ensure metrics are flushed on exit."""
171
+ self.flush()
172
+
173
+
174
+ class MetricAggregator:
175
+ """
176
+ Aggregate metrics over time before publishing.
177
+ Useful for high-frequency metrics that need aggregation.
178
+ """
179
+
180
+ def __init__(self, publisher: MetricsPublisher):
181
+ self.publisher = publisher
182
+ self.aggregates: Dict[str, List[float]] = defaultdict(list)
183
+ self.counters: Dict[str, float] = defaultdict(float)
184
+
185
+ def add_value(self, metric_name: str, value: Union[int, float]) -> None:
186
+ """Add a value to be aggregated."""
187
+ self.aggregates[metric_name].append(float(value))
188
+
189
+ def increment_counter(self, metric_name: str, value: Union[int, float] = 1) -> None:
190
+ """Increment a counter metric."""
191
+ self.counters[metric_name] += value
192
+
193
+ def publish_aggregates(self, unit: str = "None", dimensions: Optional[Dict[str, str]] = None) -> None:
194
+ """Publish all aggregated metrics with statistics."""
195
+ # Publish aggregated values
196
+ for metric_name, values in self.aggregates.items():
197
+ if values:
198
+ self.publisher.put_metric_with_statistics(metric_name, values, unit=unit, dimensions=dimensions)
199
+
200
+ # Publish counters
201
+ for metric_name, value in self.counters.items():
202
+ self.publisher.put_metric(metric_name, value, unit="Count", dimensions=dimensions)
203
+
204
+ # Clear aggregates
205
+ self.aggregates.clear()
206
+ self.counters.clear()
207
+
208
+
209
+ # Standard metric names for consistency across services
210
+ class StandardMetrics:
211
+ """Standard metric names used across application services."""
212
+
213
+ # Service health metrics
214
+ SERVICE_HEALTH = "ServiceHealth"
215
+ ERROR_RATE = "ErrorRate"
216
+ RESPONSE_TIME = "ResponseTime"
217
+ REQUEST_COUNT = "RequestCount"
218
+
219
+ # Business metrics
220
+ RECORDS_CREATED = "RecordsCreated"
221
+ RECORDS_PROCESSED = "RecordsProcessed"
222
+ USERS_ACTIVE = "ActiveUsers"
223
+ REVENUE_PROCESSED = "RevenueProcessed"
224
+
225
+ # Lambda metrics
226
+ LAMBDA_DURATION = "LambdaDuration"
227
+ LAMBDA_ERRORS = "LambdaErrors"
228
+ LAMBDA_THROTTLES = "LambdaThrottles"
229
+ LAMBDA_COLD_STARTS = "LambdaColdStarts"
230
+
231
+ # Database metrics
232
+ DB_QUERY_TIME = "DatabaseQueryTime"
233
+ DB_CONNECTION_ERRORS = "DatabaseConnectionErrors"
234
+ DB_ACTIVE_CONNECTIONS = "DatabaseActiveConnections"
235
+
236
+ # Elasticsearch metrics
237
+ ES_QUERY_TIME = "ElasticsearchQueryTime"
238
+ ES_QUERY_ERRORS = "ElasticsearchQueryErrors"
239
+ ES_DOCUMENT_COUNT = "ElasticsearchDocumentCount"
240
+
241
+ # External API metrics
242
+ API_CALL_DURATION = "ExternalAPICallDuration"
243
+ API_CALL_ERRORS = "ExternalAPICallErrors"
244
+ API_RATE_LIMIT_HITS = "APIRateLimitHits"
245
+
246
+
247
+ def track_lambda_performance(namespace: str = "Application"):
248
+ """
249
+ Decorator to automatically track Lambda function performance.
250
+
251
+ Example:
252
+ @track_lambda_performance()
253
+ def handler(event, context):
254
+ # Your Lambda logic
255
+ return response
256
+ """
257
+
258
+ def decorator(func):
259
+ def wrapper(event, context):
260
+ start_time = time.time()
261
+ cold_start = not hasattr(context, "is_warm")
262
+
263
+ publisher = MetricsPublisher(
264
+ namespace,
265
+ dimensions={"FunctionName": context.function_name, "Environment": os.environ.get("STAGE", "dev")},
266
+ )
267
+
268
+ try:
269
+ # Mark as warm for next invocation
270
+ context.is_warm = True
271
+
272
+ # Track cold start
273
+ if cold_start:
274
+ publisher.put_metric(StandardMetrics.LAMBDA_COLD_STARTS, 1, unit="Count")
275
+
276
+ # Execute function
277
+ result = func(event, context)
278
+
279
+ # Track success metrics
280
+ duration = (time.time() - start_time) * 1000 # Convert to milliseconds
281
+ publisher.put_metric(StandardMetrics.LAMBDA_DURATION, duration, unit="Milliseconds")
282
+ publisher.put_metric(StandardMetrics.REQUEST_COUNT, 1, unit="Count")
283
+
284
+ return result
285
+
286
+ except Exception:
287
+ # Track error
288
+ publisher.put_metric(StandardMetrics.LAMBDA_ERRORS, 1, unit="Count")
289
+ raise
290
+
291
+ finally:
292
+ # Ensure metrics are published
293
+ publisher.flush()
294
+
295
+ return wrapper
296
+
297
+ return decorator
298
+
299
+
300
+ def create_service_dimensions(service_name: str, environment: Optional[str] = None) -> Dict[str, str]:
301
+ """
302
+ Create standard dimensions for service metrics.
303
+
304
+ Args:
305
+ service_name: Name of the service
306
+ environment: Environment (defaults to STAGE env var)
307
+
308
+ Returns:
309
+ Dict of dimensions
310
+ """
311
+ dimensions = {"ServiceName": service_name, "Environment": environment or os.environ.get("STAGE", "dev")}
312
+
313
+ # Add region if available
314
+ region = os.environ.get("AWS_REGION", os.environ.get("AWS_DEFAULT_REGION"))
315
+ if region:
316
+ dimensions["Region"] = region
317
+
318
+ return dimensions
319
+
320
+
321
+ def publish_health_metric(service_name: str, is_healthy: bool, namespace: str = "Application") -> None:
322
+ """
323
+ Publish a simple health metric for a service.
324
+
325
+ Args:
326
+ service_name: Name of the service
327
+ is_healthy: Whether the service is healthy
328
+ namespace: CloudWatch namespace
329
+ """
330
+ publisher = MetricsPublisher(namespace, dimensions=create_service_dimensions(service_name))
331
+
332
+ publisher.put_metric(StandardMetrics.SERVICE_HEALTH, 1 if is_healthy else 0, unit="None")
333
+
334
+ publisher.flush()
335
+
336
+
337
+ class TimedMetric:
338
+ """
339
+ Context manager for timing operations and publishing metrics.
340
+
341
+ Example:
342
+ publisher = MetricsPublisher('MyApp')
343
+ with TimedMetric(publisher, 'DatabaseQuery', unit='Milliseconds'):
344
+ # Perform database query
345
+ results = db.query("SELECT * FROM records")
346
+ """
347
+
348
+ def __init__(
349
+ self,
350
+ publisher: MetricsPublisher,
351
+ metric_name: str,
352
+ unit: str = "Milliseconds",
353
+ dimensions: Optional[Dict[str, str]] = None,
354
+ ):
355
+ self.publisher = publisher
356
+ self.metric_name = metric_name
357
+ self.unit = unit
358
+ self.dimensions = dimensions
359
+ self.start_time = None
360
+
361
+ def __enter__(self):
362
+ self.start_time = time.time()
363
+ return self
364
+
365
+ def __exit__(self, exc_type, exc_val, exc_tb):
366
+ duration = (time.time() - self.start_time) * 1000 # Convert to milliseconds
367
+ self.publisher.put_metric(self.metric_name, duration, unit=self.unit, dimensions=self.dimensions)