workbench 0.8.167__py3-none-any.whl → 0.8.168__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of workbench might be problematic. Click here for more details.
- workbench/scripts/ml_pipeline_launcher.py +44 -14
- workbench/scripts/monitor_cloud_watch.py +20 -100
- workbench/utils/cloudwatch_utils.py +137 -0
- {workbench-0.8.167.dist-info → workbench-0.8.168.dist-info}/METADATA +1 -1
- {workbench-0.8.167.dist-info → workbench-0.8.168.dist-info}/RECORD +9 -8
- {workbench-0.8.167.dist-info → workbench-0.8.168.dist-info}/WHEEL +0 -0
- {workbench-0.8.167.dist-info → workbench-0.8.168.dist-info}/entry_points.txt +0 -0
- {workbench-0.8.167.dist-info → workbench-0.8.168.dist-info}/licenses/LICENSE +0 -0
- {workbench-0.8.167.dist-info → workbench-0.8.168.dist-info}/top_level.txt +0 -0
|
@@ -8,6 +8,7 @@ from pathlib import Path
|
|
|
8
8
|
from workbench.core.cloud_platform.aws.aws_account_clamp import AWSAccountClamp
|
|
9
9
|
from workbench.utils.config_manager import ConfigManager
|
|
10
10
|
from workbench.utils.s3_utils import upload_content_to_s3
|
|
11
|
+
from workbench.utils.cloudwatch_utils import get_cloudwatch_logs_url
|
|
11
12
|
|
|
12
13
|
log = logging.getLogger("workbench")
|
|
13
14
|
cm = ConfigManager()
|
|
@@ -27,10 +28,9 @@ def get_batch_role_arn() -> str:
|
|
|
27
28
|
|
|
28
29
|
|
|
29
30
|
def ensure_job_definition():
|
|
30
|
-
"""
|
|
31
|
+
"""Register or update the Batch job definition for ML pipeline runner."""
|
|
31
32
|
batch = AWSAccountClamp().boto3_session.client("batch")
|
|
32
33
|
name = "workbench-ml-pipeline-runner"
|
|
33
|
-
|
|
34
34
|
response = batch.register_job_definition(
|
|
35
35
|
jobDefinitionName=name,
|
|
36
36
|
type="container",
|
|
@@ -40,27 +40,42 @@ def ensure_job_definition():
|
|
|
40
40
|
"resourceRequirements": [{"type": "VCPU", "value": "2"}, {"type": "MEMORY", "value": "4096"}],
|
|
41
41
|
"jobRoleArn": get_batch_role_arn(),
|
|
42
42
|
"executionRoleArn": get_batch_role_arn(),
|
|
43
|
-
"environment": [
|
|
44
|
-
|
|
43
|
+
"environment": [
|
|
44
|
+
{"name": "WORKBENCH_BUCKET", "value": workbench_bucket},
|
|
45
|
+
{"name": "PYTHONUNBUFFERED", "value": "1"},
|
|
46
|
+
],
|
|
47
|
+
# "networkConfiguration": {"assignPublicIp": "ENABLED"}, # Required for ECR Image Pull (when not in VPC)
|
|
45
48
|
},
|
|
46
49
|
timeout={"attemptDurationSeconds": 10800}, # 3 hours
|
|
47
50
|
)
|
|
48
|
-
|
|
49
51
|
log.info(f"Job definition ready: {name} (revision {response['revision']})")
|
|
50
52
|
return name
|
|
51
53
|
|
|
52
54
|
|
|
53
55
|
def run_batch_job(script_path: str) -> int:
|
|
54
|
-
"""
|
|
56
|
+
"""
|
|
57
|
+
Submit and monitor an AWS Batch job for ML pipeline execution.
|
|
58
|
+
This function:
|
|
59
|
+
1. Uploads the ML pipeline script to S3
|
|
60
|
+
2. Submits a Batch job to run the script in a container
|
|
61
|
+
3. Monitors job status until completion
|
|
62
|
+
4. Returns the job's exit code
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
script_path: Local path to the ML pipeline script
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
Exit code from the batch job (0 for success, non-zero for failure)
|
|
69
|
+
"""
|
|
55
70
|
batch = AWSAccountClamp().boto3_session.client("batch")
|
|
56
71
|
script_name = Path(script_path).stem
|
|
57
72
|
|
|
58
|
-
# Upload script to S3
|
|
73
|
+
# Upload script to S3 for the container to download
|
|
59
74
|
s3_path = f"s3://{workbench_bucket}/batch-jobs/{Path(script_path).name}"
|
|
60
75
|
log.info(f"Uploading script to {s3_path}")
|
|
61
76
|
upload_content_to_s3(Path(script_path).read_text(), s3_path)
|
|
62
77
|
|
|
63
|
-
# Submit job
|
|
78
|
+
# Submit the Batch job
|
|
64
79
|
job_name = f"workbench_{script_name}_{datetime.now():%Y%m%d_%H%M%S}"
|
|
65
80
|
response = batch.submit_job(
|
|
66
81
|
jobName=job_name,
|
|
@@ -68,35 +83,50 @@ def run_batch_job(script_path: str) -> int:
|
|
|
68
83
|
jobDefinition=ensure_job_definition(),
|
|
69
84
|
containerOverrides={
|
|
70
85
|
"environment": [
|
|
71
|
-
{"name": "
|
|
86
|
+
{"name": "ML_PIPELINE_S3_PATH", "value": s3_path},
|
|
72
87
|
{"name": "WORKBENCH_BUCKET", "value": workbench_bucket},
|
|
73
88
|
]
|
|
74
89
|
},
|
|
75
90
|
)
|
|
76
|
-
|
|
77
91
|
job_id = response["jobId"]
|
|
78
92
|
log.info(f"Submitted job: {job_name} ({job_id})")
|
|
79
93
|
|
|
80
|
-
#
|
|
94
|
+
# Monitor job execution
|
|
95
|
+
last_status = None
|
|
81
96
|
while True:
|
|
97
|
+
# Check job status
|
|
82
98
|
job = batch.describe_jobs(jobs=[job_id])["jobs"][0]
|
|
83
99
|
status = job["status"]
|
|
84
|
-
|
|
100
|
+
if status != last_status:
|
|
101
|
+
log.info(f"Job status: {status}")
|
|
102
|
+
last_status = status
|
|
85
103
|
|
|
104
|
+
# Check if job completed
|
|
86
105
|
if status in ["SUCCEEDED", "FAILED"]:
|
|
87
106
|
exit_code = job.get("attempts", [{}])[-1].get("exitCode", 1)
|
|
88
107
|
if status == "FAILED":
|
|
89
108
|
log.error(f"Job failed: {job.get('statusReason', 'Unknown reason')}")
|
|
109
|
+
else:
|
|
110
|
+
log.info("Job completed successfully")
|
|
111
|
+
|
|
112
|
+
# Get CloudWatch logs URL
|
|
113
|
+
log_stream_name = job.get("container", {}).get("logStreamName")
|
|
114
|
+
logs_url = get_cloudwatch_logs_url(log_group="/aws/batch/job", log_stream=log_stream_name)
|
|
115
|
+
if logs_url:
|
|
116
|
+
# OSC 8 hyperlink format for modern terminals
|
|
117
|
+
clickable_url = f"\033]8;;{logs_url}\033\\{logs_url}\033]8;;\033\\"
|
|
118
|
+
log.info(f"View logs: {clickable_url}")
|
|
90
119
|
return exit_code
|
|
91
120
|
|
|
92
|
-
|
|
121
|
+
# Sleep a bit before next status check
|
|
122
|
+
time.sleep(10)
|
|
93
123
|
|
|
94
124
|
|
|
95
125
|
def main():
|
|
126
|
+
"""CLI entry point for running ML pipelines on AWS Batch."""
|
|
96
127
|
parser = argparse.ArgumentParser(description="Run ML pipeline script on AWS Batch")
|
|
97
128
|
parser.add_argument("script_file", help="Local path to ML pipeline script")
|
|
98
129
|
args = parser.parse_args()
|
|
99
|
-
|
|
100
130
|
try:
|
|
101
131
|
exit_code = run_batch_job(args.script_file)
|
|
102
132
|
exit(exit_code)
|
|
@@ -4,8 +4,10 @@ import sys
|
|
|
4
4
|
import time
|
|
5
5
|
import argparse
|
|
6
6
|
from datetime import datetime, timedelta, timezone
|
|
7
|
-
|
|
7
|
+
|
|
8
|
+
# Workbench Imports
|
|
8
9
|
from workbench.utils.repl_utils import cprint, Spinner
|
|
10
|
+
from workbench.utils.cloudwatch_utils import get_cloudwatch_client, get_active_log_streams, stream_log_events
|
|
9
11
|
|
|
10
12
|
# Define the log levels to include all log levels above the specified level
|
|
11
13
|
log_level_map = {
|
|
@@ -33,64 +35,6 @@ def date_display(dt):
|
|
|
33
35
|
return dt.strftime("%Y-%m-%d %I:%M%p") + "(UTC)"
|
|
34
36
|
|
|
35
37
|
|
|
36
|
-
def get_cloudwatch_client():
|
|
37
|
-
"""Get the CloudWatch Logs client using the Workbench assumed role session."""
|
|
38
|
-
session = AWSAccountClamp().boto3_session
|
|
39
|
-
return session.client("logs")
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
def get_active_log_streams(client, log_group_name, start_time_ms, stream_filter=None):
|
|
43
|
-
"""Retrieve log streams that have events after the specified start time."""
|
|
44
|
-
|
|
45
|
-
# Get all the streams in the log group
|
|
46
|
-
active_streams = []
|
|
47
|
-
stream_params = {
|
|
48
|
-
"logGroupName": log_group_name,
|
|
49
|
-
"orderBy": "LastEventTime",
|
|
50
|
-
"descending": True,
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
# Loop to retrieve all log streams (maximum 50 per call)
|
|
54
|
-
while True:
|
|
55
|
-
response = client.describe_log_streams(**stream_params)
|
|
56
|
-
log_streams = response.get("logStreams", [])
|
|
57
|
-
|
|
58
|
-
for log_stream in log_streams:
|
|
59
|
-
log_stream_name = log_stream["logStreamName"]
|
|
60
|
-
last_event_timestamp = log_stream.get("lastEventTimestamp")
|
|
61
|
-
|
|
62
|
-
# Include streams with events since the specified start time
|
|
63
|
-
# Note: There's some issue where the last event timestamp is 'off'
|
|
64
|
-
# so we're going to add 60 minutes from the last event timestamp
|
|
65
|
-
last_event_timestamp += 60 * 60 * 1000
|
|
66
|
-
if last_event_timestamp >= start_time_ms:
|
|
67
|
-
active_streams.append(log_stream_name)
|
|
68
|
-
else:
|
|
69
|
-
break # Stop if we reach streams older than the start time
|
|
70
|
-
|
|
71
|
-
# Check if there are more streams to retrieve
|
|
72
|
-
if "nextToken" in response:
|
|
73
|
-
stream_params["nextToken"] = response["nextToken"]
|
|
74
|
-
else:
|
|
75
|
-
break
|
|
76
|
-
|
|
77
|
-
# Sort and report the active log streams
|
|
78
|
-
active_streams.sort()
|
|
79
|
-
if active_streams:
|
|
80
|
-
print("Active log streams:", len(active_streams))
|
|
81
|
-
|
|
82
|
-
# Filter the active streams by a substring if provided
|
|
83
|
-
if stream_filter and active_streams:
|
|
84
|
-
print(f"Filtering active log streams by '{stream_filter}'...")
|
|
85
|
-
active_streams = [stream for stream in active_streams if stream_filter in stream]
|
|
86
|
-
|
|
87
|
-
for stream in active_streams:
|
|
88
|
-
print(f"\t - {stream}")
|
|
89
|
-
|
|
90
|
-
# Return the active log streams
|
|
91
|
-
return active_streams
|
|
92
|
-
|
|
93
|
-
|
|
94
38
|
def get_latest_log_events(client, log_group_name, start_time, end_time=None, stream_filter=None):
|
|
95
39
|
"""Retrieve the latest log events from the active/filtered log streams in a CloudWatch Logs group."""
|
|
96
40
|
|
|
@@ -99,11 +43,15 @@ def get_latest_log_events(client, log_group_name, start_time, end_time=None, str
|
|
|
99
43
|
get_latest_log_events.first_run = True
|
|
100
44
|
|
|
101
45
|
log_events = []
|
|
102
|
-
start_time_ms = int(start_time.timestamp() * 1000)
|
|
46
|
+
start_time_ms = int(start_time.timestamp() * 1000)
|
|
47
|
+
|
|
48
|
+
# Use the util function to get active streams
|
|
49
|
+
active_streams = get_active_log_streams(log_group_name, start_time_ms, stream_filter, client)
|
|
103
50
|
|
|
104
|
-
# Get the active log streams with events since start_time
|
|
105
|
-
active_streams = get_active_log_streams(client, log_group_name, start_time_ms, stream_filter)
|
|
106
51
|
if active_streams:
|
|
52
|
+
print(f"Active log streams: {len(active_streams)}")
|
|
53
|
+
for stream in active_streams:
|
|
54
|
+
print(f"\t - {stream}")
|
|
107
55
|
print(f"Processing log events from {date_display(start_time)} on {len(active_streams)} active log streams...")
|
|
108
56
|
get_latest_log_events.first_run = False
|
|
109
57
|
else:
|
|
@@ -114,50 +62,22 @@ def get_latest_log_events(client, log_group_name, start_time, end_time=None, str
|
|
|
114
62
|
print("Monitoring for new events...")
|
|
115
63
|
return log_events
|
|
116
64
|
|
|
117
|
-
#
|
|
65
|
+
# Use the util function to stream events from each log stream
|
|
118
66
|
for log_stream_name in active_streams:
|
|
119
|
-
params = {
|
|
120
|
-
"logGroupName": log_group_name,
|
|
121
|
-
"logStreamName": log_stream_name,
|
|
122
|
-
"startTime": start_time_ms, # Use start_time in milliseconds
|
|
123
|
-
"startFromHead": True, # Start from the nearest event to start_time
|
|
124
|
-
}
|
|
125
|
-
next_event_token = None
|
|
126
|
-
if end_time is not None:
|
|
127
|
-
params["endTime"] = int(end_time.timestamp() * 1000)
|
|
128
|
-
|
|
129
|
-
# Process the log events from this log stream
|
|
130
67
|
spinner = Spinner("lightpurple", f"Pulling events from {log_stream_name}:")
|
|
131
68
|
spinner.start()
|
|
132
69
|
log_stream_events = 0
|
|
133
70
|
|
|
134
|
-
#
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
71
|
+
# Stream events using the util function
|
|
72
|
+
for event in stream_log_events(
|
|
73
|
+
log_group_name, log_stream_name, start_time, end_time, follow=False, client=client
|
|
74
|
+
):
|
|
75
|
+
log_stream_events += 1
|
|
76
|
+
log_events.append(event)
|
|
139
77
|
|
|
140
|
-
|
|
141
|
-
|
|
78
|
+
spinner.stop()
|
|
79
|
+
print(f"Processed {log_stream_events} events from {log_stream_name} (Total: {len(log_events)})")
|
|
142
80
|
|
|
143
|
-
events = events_response.get("events", [])
|
|
144
|
-
for event in events:
|
|
145
|
-
event["logStreamName"] = log_stream_name
|
|
146
|
-
|
|
147
|
-
# Add the log stream events to our list of all log events
|
|
148
|
-
log_stream_events += len(events)
|
|
149
|
-
log_events.extend(events)
|
|
150
|
-
|
|
151
|
-
# Handle pagination for log events
|
|
152
|
-
next_event_token = events_response.get("nextForwardToken")
|
|
153
|
-
|
|
154
|
-
# Break the loop if there are no more events to fetch
|
|
155
|
-
if not next_event_token or next_event_token == params.get("nextToken"):
|
|
156
|
-
spinner.stop()
|
|
157
|
-
print(f"Processed {log_stream_events} events from {log_stream_name} (Total: {len(log_events)})")
|
|
158
|
-
break
|
|
159
|
-
|
|
160
|
-
# Return the log events
|
|
161
81
|
return log_events
|
|
162
82
|
|
|
163
83
|
|
|
@@ -206,6 +126,7 @@ def monitor_log_group(
|
|
|
206
126
|
print(f"Monitoring log group: {log_group_name} from {date_display(start_time)}")
|
|
207
127
|
print(f"Log levels: {log_levels}")
|
|
208
128
|
print(f"Search terms: {search_terms}")
|
|
129
|
+
|
|
209
130
|
while True:
|
|
210
131
|
# Get the latest log events with stream filtering if provided
|
|
211
132
|
all_log_events = get_latest_log_events(client, log_group_name, start_time, end_time, stream_filter)
|
|
@@ -218,7 +139,6 @@ def monitor_log_group(
|
|
|
218
139
|
|
|
219
140
|
# Check the search terms
|
|
220
141
|
if not search_terms or any(term in event["message"].lower() for term in search_terms):
|
|
221
|
-
|
|
222
142
|
# Calculate the start and end index for this match
|
|
223
143
|
start_index = max(i - before, 0)
|
|
224
144
|
end_index = min(i + after, len(all_log_events) - 1)
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
"""AWS CloudWatch utility functions for Workbench."""
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
import logging
|
|
5
|
+
from datetime import datetime, timezone
|
|
6
|
+
from typing import List, Optional, Dict, Generator
|
|
7
|
+
from urllib.parse import quote
|
|
8
|
+
from workbench.core.cloud_platform.aws.aws_account_clamp import AWSAccountClamp
|
|
9
|
+
|
|
10
|
+
log = logging.getLogger("workbench")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_cloudwatch_client():
|
|
14
|
+
"""Get the CloudWatch Logs client using the Workbench assumed role session."""
|
|
15
|
+
session = AWSAccountClamp().boto3_session
|
|
16
|
+
return session.client("logs")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def get_cloudwatch_logs_url(log_group: str, log_stream: str) -> Optional[str]:
|
|
20
|
+
"""
|
|
21
|
+
Generate CloudWatch logs URL for the specified log group and stream.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
log_group: Log group name (e.g., '/aws/batch/job')
|
|
25
|
+
log_stream: Log stream name
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
CloudWatch console URL or None if unable to generate
|
|
29
|
+
"""
|
|
30
|
+
try:
|
|
31
|
+
region = AWSAccountClamp().region
|
|
32
|
+
|
|
33
|
+
# URL encode the log group and stream
|
|
34
|
+
encoded_group = quote(log_group, safe="")
|
|
35
|
+
encoded_stream = quote(log_stream, safe="")
|
|
36
|
+
|
|
37
|
+
return (
|
|
38
|
+
f"https://{region}.console.aws.amazon.com/cloudwatch/home?"
|
|
39
|
+
f"region={region}#logsV2:log-groups/log-group/{encoded_group}"
|
|
40
|
+
f"/log-events/{encoded_stream}"
|
|
41
|
+
)
|
|
42
|
+
except Exception as e: # noqa: BLE001
|
|
43
|
+
log.warning(f"Failed to generate CloudWatch logs URL: {e}")
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def get_active_log_streams(
|
|
48
|
+
log_group_name: str, start_time_ms: int, stream_filter: Optional[str] = None, client=None
|
|
49
|
+
) -> List[str]:
|
|
50
|
+
"""Retrieve log streams that have events after the specified start time."""
|
|
51
|
+
if not client:
|
|
52
|
+
client = get_cloudwatch_client()
|
|
53
|
+
active_streams = []
|
|
54
|
+
stream_params = {
|
|
55
|
+
"logGroupName": log_group_name,
|
|
56
|
+
"orderBy": "LastEventTime",
|
|
57
|
+
"descending": True,
|
|
58
|
+
}
|
|
59
|
+
while True:
|
|
60
|
+
response = client.describe_log_streams(**stream_params)
|
|
61
|
+
log_streams = response.get("logStreams", [])
|
|
62
|
+
for log_stream in log_streams:
|
|
63
|
+
log_stream_name = log_stream["logStreamName"]
|
|
64
|
+
last_event_timestamp = log_stream.get("lastEventTimestamp", 0)
|
|
65
|
+
if last_event_timestamp >= start_time_ms:
|
|
66
|
+
active_streams.append(log_stream_name)
|
|
67
|
+
else:
|
|
68
|
+
break
|
|
69
|
+
if "nextToken" in response:
|
|
70
|
+
stream_params["nextToken"] = response["nextToken"]
|
|
71
|
+
else:
|
|
72
|
+
break
|
|
73
|
+
# Sort and filter streams
|
|
74
|
+
active_streams.sort()
|
|
75
|
+
if stream_filter and active_streams:
|
|
76
|
+
active_streams = [stream for stream in active_streams if stream_filter in stream]
|
|
77
|
+
return active_streams
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def stream_log_events(
|
|
81
|
+
log_group_name: str,
|
|
82
|
+
log_stream_name: str,
|
|
83
|
+
start_time: Optional[datetime] = None,
|
|
84
|
+
end_time: Optional[datetime] = None,
|
|
85
|
+
follow: bool = False,
|
|
86
|
+
client=None,
|
|
87
|
+
) -> Generator[Dict, None, None]:
|
|
88
|
+
"""
|
|
89
|
+
Stream log events from a specific log stream.
|
|
90
|
+
Yields:
|
|
91
|
+
Log events as dictionaries
|
|
92
|
+
"""
|
|
93
|
+
if not client:
|
|
94
|
+
client = get_cloudwatch_client()
|
|
95
|
+
params = {"logGroupName": log_group_name, "logStreamName": log_stream_name, "startFromHead": True}
|
|
96
|
+
if start_time:
|
|
97
|
+
params["startTime"] = int(start_time.timestamp() * 1000)
|
|
98
|
+
if end_time:
|
|
99
|
+
params["endTime"] = int(end_time.timestamp() * 1000)
|
|
100
|
+
next_token = None
|
|
101
|
+
while True:
|
|
102
|
+
if next_token:
|
|
103
|
+
params["nextToken"] = next_token
|
|
104
|
+
params.pop("startTime", None)
|
|
105
|
+
try:
|
|
106
|
+
response = client.get_log_events(**params)
|
|
107
|
+
events = response.get("events", [])
|
|
108
|
+
for event in events:
|
|
109
|
+
event["logStreamName"] = log_stream_name
|
|
110
|
+
yield event
|
|
111
|
+
next_token = response.get("nextForwardToken")
|
|
112
|
+
# Break if no more events or same token
|
|
113
|
+
if not next_token or next_token == params.get("nextToken"):
|
|
114
|
+
if not follow:
|
|
115
|
+
break
|
|
116
|
+
time.sleep(2)
|
|
117
|
+
except client.exceptions.ResourceNotFoundException:
|
|
118
|
+
if not follow:
|
|
119
|
+
break
|
|
120
|
+
time.sleep(2)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def print_log_event(
|
|
124
|
+
event: dict, show_stream: bool = True, local_time: bool = True, custom_format: Optional[str] = None
|
|
125
|
+
):
|
|
126
|
+
"""Print a formatted log event."""
|
|
127
|
+
timestamp = datetime.fromtimestamp(event["timestamp"] / 1000, tz=timezone.utc)
|
|
128
|
+
if local_time:
|
|
129
|
+
timestamp = timestamp.astimezone()
|
|
130
|
+
message = event["message"].rstrip()
|
|
131
|
+
if custom_format:
|
|
132
|
+
# Allow custom formatting
|
|
133
|
+
print(custom_format.format(stream=event.get("logStreamName", ""), time=timestamp, message=message))
|
|
134
|
+
elif show_stream and "logStreamName" in event:
|
|
135
|
+
print(f"[{event['logStreamName']}] [{timestamp:%Y-%m-%d %I:%M%p}] {message}")
|
|
136
|
+
else:
|
|
137
|
+
print(f"[{timestamp:%H:%M:%S}] {message}")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: workbench
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.168
|
|
4
4
|
Summary: Workbench: A Dashboard and Python API for creating and deploying AWS SageMaker Model Pipelines
|
|
5
5
|
Author-email: SuperCowPowers LLC <support@supercowpowers.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -167,8 +167,8 @@ workbench/resources/open_source_api.key,sha256=3S0OTblsmC0msUPdE_dbBmI83xJNmYscu
|
|
|
167
167
|
workbench/resources/signature_verify_pub.pem,sha256=V3-u-3_z2PH-805ybkKvzDOBwAbvHxcKn0jLBImEtzM,272
|
|
168
168
|
workbench/scripts/check_double_bond_stereo.py,sha256=p5hnL54Weq77ES0HCELq9JeoM-PyUGkvVSeWYF2dKyo,7776
|
|
169
169
|
workbench/scripts/glue_launcher.py,sha256=bIKQvfGxpAhzbeNvTnHfRW_5kQhY-169_868ZnCejJk,10692
|
|
170
|
-
workbench/scripts/ml_pipeline_launcher.py,sha256=
|
|
171
|
-
workbench/scripts/monitor_cloud_watch.py,sha256=
|
|
170
|
+
workbench/scripts/ml_pipeline_launcher.py,sha256=RUKUBERL7RE-uNs_ttkPUa6Rf-QJERYWIhp_XLOtF78,5083
|
|
171
|
+
workbench/scripts/monitor_cloud_watch.py,sha256=s7MY4bsHts0nup9G0lWESCvgJZ9Mw1Eo-c8aKRgLjMw,9235
|
|
172
172
|
workbench/scripts/redis_expire.py,sha256=DxI_RKSNlrW2BsJZXcsSbaWGBgPZdPhtzHjV9SUtElE,1120
|
|
173
173
|
workbench/scripts/redis_report.py,sha256=iaJSuGPyLCs6e0TMcZDoT0YyJ43xJ1u74YD8FLnnUg4,990
|
|
174
174
|
workbench/scripts/show_config.py,sha256=ff2wIKIlOktoitcrhk2r2B4I4N_ynXkEHB11l5nn0nA,548
|
|
@@ -199,6 +199,7 @@ workbench/utils/bulk_utils.py,sha256=s1lYN2Uk536MNGetekLYL_VL0N34hUjk1FX9BAz3Qu0
|
|
|
199
199
|
workbench/utils/cache.py,sha256=0R5RXYEz_XHARK3anmQC4VRMawMks_cJ8S4vwC2roAE,5524
|
|
200
200
|
workbench/utils/chem_utils.py,sha256=tLTAvLKTOiYSzbVQF0M8V5-ej36IVgr21CNB2vVJjYQ,56780
|
|
201
201
|
workbench/utils/cloudwatch_handler.py,sha256=t0L280Qa1nMq95dwnf8lB5g8FHrQAyGY5S4JwP3yIa8,5165
|
|
202
|
+
workbench/utils/cloudwatch_utils.py,sha256=wXSqKcJlSnHyC0D6d4RsH8wwmx_0CsffcetUgXlZ_78,4828
|
|
202
203
|
workbench/utils/color_utils.py,sha256=TmDGLK44t975lkfjt_1O-ee02QxrKfke7vPuXb-V-Uo,11779
|
|
203
204
|
workbench/utils/config_manager.py,sha256=SBBmO1RGCQ_Zyh91tDxL1HOm5B0v38ImlLnOsEKzXPU,17649
|
|
204
205
|
workbench/utils/dashboard_metrics.py,sha256=cNFI0GIAjd_IiDzM1oebsJ2QkRZuW068W_66ZC3J100,7398
|
|
@@ -275,9 +276,9 @@ workbench/web_interface/page_views/main_page.py,sha256=X4-KyGTKLAdxR-Zk2niuLJB2Y
|
|
|
275
276
|
workbench/web_interface/page_views/models_page_view.py,sha256=M0bdC7bAzLyIaE2jviY12FF4abdMFZmg6sFuOY_LaGI,2650
|
|
276
277
|
workbench/web_interface/page_views/page_view.py,sha256=Gh6YnpOGlUejx-bHZAf5pzqoQ1H1R0OSwOpGhOBO06w,455
|
|
277
278
|
workbench/web_interface/page_views/pipelines_page_view.py,sha256=v2pxrIbsHBcYiblfius3JK766NZ7ciD2yPx0t3E5IJo,2656
|
|
278
|
-
workbench-0.8.
|
|
279
|
-
workbench-0.8.
|
|
280
|
-
workbench-0.8.
|
|
281
|
-
workbench-0.8.
|
|
282
|
-
workbench-0.8.
|
|
283
|
-
workbench-0.8.
|
|
279
|
+
workbench-0.8.168.dist-info/licenses/LICENSE,sha256=z4QMMPlLJkZjU8VOKqJkZiQZCEZ--saIU2Z8-p3aVc0,1080
|
|
280
|
+
workbench-0.8.168.dist-info/METADATA,sha256=PTFR16ft5NCrG-_umsJKrIJSa3eLnpju1EkXZmafxxM,9210
|
|
281
|
+
workbench-0.8.168.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
282
|
+
workbench-0.8.168.dist-info/entry_points.txt,sha256=V_v6hQ4DYoCJnTnqbm036reCri_CXkA_ONcRSuF5OKg,305
|
|
283
|
+
workbench-0.8.168.dist-info/top_level.txt,sha256=Dhy72zTxaA_o_yRkPZx5zw-fwumnjGaeGf0hBN3jc_w,10
|
|
284
|
+
workbench-0.8.168.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|