workbench 0.8.167__py3-none-any.whl → 0.8.169__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of workbench might be problematic. Click here for more details.

@@ -8,6 +8,7 @@ from pathlib import Path
8
8
  from workbench.core.cloud_platform.aws.aws_account_clamp import AWSAccountClamp
9
9
  from workbench.utils.config_manager import ConfigManager
10
10
  from workbench.utils.s3_utils import upload_content_to_s3
11
+ from workbench.utils.cloudwatch_utils import get_cloudwatch_logs_url
11
12
 
12
13
  log = logging.getLogger("workbench")
13
14
  cm = ConfigManager()
@@ -26,32 +27,42 @@ def get_batch_role_arn() -> str:
26
27
  return f"arn:aws:iam::{account_id}:role/Workbench-BatchRole"
27
28
 
28
29
 
29
- def ensure_job_definition():
30
- """Ensure the job definition exists with network configuration."""
31
- batch = AWSAccountClamp().boto3_session.client("batch")
32
- name = "workbench-ml-pipeline-runner"
33
-
34
- response = batch.register_job_definition(
35
- jobDefinitionName=name,
36
- type="container",
37
- platformCapabilities=["FARGATE"],
38
- containerProperties={
39
- "image": get_ecr_image_uri(),
40
- "resourceRequirements": [{"type": "VCPU", "value": "2"}, {"type": "MEMORY", "value": "4096"}],
41
- "jobRoleArn": get_batch_role_arn(),
42
- "executionRoleArn": get_batch_role_arn(),
43
- "environment": [{"name": "WORKBENCH_BUCKET", "value": workbench_bucket}],
44
- "networkConfiguration": {"assignPublicIp": "ENABLED"}, # This is required so the ECR image can be pulled
45
- },
46
- timeout={"attemptDurationSeconds": 10800}, # 3 hours
47
- )
30
+ def _log_cloudwatch_link(job: dict, message_prefix: str = "View logs") -> None:
31
+ """
32
+ Helper method to log CloudWatch logs link with clickable URL and full URL display.
33
+
34
+ Args:
35
+ job: Batch job description dictionary
36
+ message_prefix: Prefix for the log message (default: "View logs")
37
+ """
38
+ log_stream = job.get("container", {}).get("logStreamName")
39
+ logs_url = get_cloudwatch_logs_url(log_group="/aws/batch/job", log_stream=log_stream)
40
+ if logs_url:
41
+ clickable_url = f"\033]8;;{logs_url}\033\\{logs_url}\033]8;;\033\\"
42
+ log.info(f"{message_prefix}: {clickable_url}")
43
+ else:
44
+ log.info("Check AWS Batch console for logs")
45
+
46
+
47
+ def run_batch_job(script_path: str, size: str = "small") -> int:
48
+ """
49
+ Submit and monitor an AWS Batch job for ML pipeline execution.
48
50
 
49
- log.info(f"Job definition ready: {name} (revision {response['revision']})")
50
- return name
51
+ Uploads script to S3, submits Batch job, monitors until completion or 2 minutes of RUNNING.
51
52
 
53
+ Args:
54
+ script_path: Local path to the ML pipeline script
55
+ size: Job size tier - "small" (default), "medium", or "large"
56
+ - small: 2 vCPU, 4GB RAM for lightweight processing
57
+ - medium: 4 vCPU, 8GB RAM for standard ML workloads
58
+ - large: 8 vCPU, 16GB RAM for heavy training/inference
59
+
60
+ Returns:
61
+ Exit code (0 for success/disconnected, non-zero for failure)
62
+ """
63
+ if size not in ["small", "medium", "large"]:
64
+ raise ValueError(f"Invalid size '{size}'. Must be 'small', 'medium', or 'large'")
52
65
 
53
- def run_batch_job(script_path: str) -> int:
54
- """Upload script, submit job, and track to completion."""
55
66
  batch = AWSAccountClamp().boto3_session.client("batch")
56
67
  script_name = Path(script_path).stem
57
68
 
@@ -65,38 +76,55 @@ def run_batch_job(script_path: str) -> int:
65
76
  response = batch.submit_job(
66
77
  jobName=job_name,
67
78
  jobQueue="workbench-job-queue",
68
- jobDefinition=ensure_job_definition(),
79
+ jobDefinition=f"workbench-ml-pipeline-{size}",
69
80
  containerOverrides={
70
81
  "environment": [
71
- {"name": "SCRIPT_S3_PATH", "value": s3_path},
82
+ {"name": "ML_PIPELINE_S3_PATH", "value": s3_path},
72
83
  {"name": "WORKBENCH_BUCKET", "value": workbench_bucket},
73
84
  ]
74
85
  },
75
86
  )
76
-
77
87
  job_id = response["jobId"]
78
- log.info(f"Submitted job: {job_name} ({job_id})")
88
+ log.info(f"Submitted job: {job_name} ({job_id}) using {size} tier")
79
89
 
80
- # Track job to completion
90
+ # Monitor job
91
+ last_status, running_start = None, None
81
92
  while True:
82
93
  job = batch.describe_jobs(jobs=[job_id])["jobs"][0]
83
94
  status = job["status"]
84
- log.info(f"Job status: {status}")
85
95
 
96
+ if status != last_status:
97
+ log.info(f"Job status: {status}")
98
+ last_status = status
99
+ if status == "RUNNING":
100
+ running_start = time.time()
101
+
102
+ # Disconnect after 2 minutes of running
103
+ if status == "RUNNING" and running_start and (time.time() - running_start >= 120):
104
+ log.info("✅ ML Pipeline is running successfully!")
105
+ _log_cloudwatch_link(job, "📊 Monitor logs")
106
+ return 0
107
+
108
+ # Handle completion
86
109
  if status in ["SUCCEEDED", "FAILED"]:
87
110
  exit_code = job.get("attempts", [{}])[-1].get("exitCode", 1)
88
- if status == "FAILED":
89
- log.error(f"Job failed: {job.get('statusReason', 'Unknown reason')}")
111
+ msg = (
112
+ "Job completed successfully"
113
+ if status == "SUCCEEDED"
114
+ else f"Job failed: {job.get('statusReason', 'Unknown')}"
115
+ )
116
+ log.info(msg) if status == "SUCCEEDED" else log.error(msg)
117
+ _log_cloudwatch_link(job)
90
118
  return exit_code
91
119
 
92
- time.sleep(30)
120
+ time.sleep(10)
93
121
 
94
122
 
95
123
  def main():
124
+ """CLI entry point for running ML pipelines on AWS Batch."""
96
125
  parser = argparse.ArgumentParser(description="Run ML pipeline script on AWS Batch")
97
126
  parser.add_argument("script_file", help="Local path to ML pipeline script")
98
127
  args = parser.parse_args()
99
-
100
128
  try:
101
129
  exit_code = run_batch_job(args.script_file)
102
130
  exit(exit_code)
@@ -4,8 +4,10 @@ import sys
4
4
  import time
5
5
  import argparse
6
6
  from datetime import datetime, timedelta, timezone
7
- from workbench.core.cloud_platform.aws.aws_account_clamp import AWSAccountClamp
7
+
8
+ # Workbench Imports
8
9
  from workbench.utils.repl_utils import cprint, Spinner
10
+ from workbench.utils.cloudwatch_utils import get_cloudwatch_client, get_active_log_streams, stream_log_events
9
11
 
10
12
  # Define the log levels to include all log levels above the specified level
11
13
  log_level_map = {
@@ -33,64 +35,6 @@ def date_display(dt):
33
35
  return dt.strftime("%Y-%m-%d %I:%M%p") + "(UTC)"
34
36
 
35
37
 
36
- def get_cloudwatch_client():
37
- """Get the CloudWatch Logs client using the Workbench assumed role session."""
38
- session = AWSAccountClamp().boto3_session
39
- return session.client("logs")
40
-
41
-
42
- def get_active_log_streams(client, log_group_name, start_time_ms, stream_filter=None):
43
- """Retrieve log streams that have events after the specified start time."""
44
-
45
- # Get all the streams in the log group
46
- active_streams = []
47
- stream_params = {
48
- "logGroupName": log_group_name,
49
- "orderBy": "LastEventTime",
50
- "descending": True,
51
- }
52
-
53
- # Loop to retrieve all log streams (maximum 50 per call)
54
- while True:
55
- response = client.describe_log_streams(**stream_params)
56
- log_streams = response.get("logStreams", [])
57
-
58
- for log_stream in log_streams:
59
- log_stream_name = log_stream["logStreamName"]
60
- last_event_timestamp = log_stream.get("lastEventTimestamp")
61
-
62
- # Include streams with events since the specified start time
63
- # Note: There's some issue where the last event timestamp is 'off'
64
- # so we're going to add 60 minutes from the last event timestamp
65
- last_event_timestamp += 60 * 60 * 1000
66
- if last_event_timestamp >= start_time_ms:
67
- active_streams.append(log_stream_name)
68
- else:
69
- break # Stop if we reach streams older than the start time
70
-
71
- # Check if there are more streams to retrieve
72
- if "nextToken" in response:
73
- stream_params["nextToken"] = response["nextToken"]
74
- else:
75
- break
76
-
77
- # Sort and report the active log streams
78
- active_streams.sort()
79
- if active_streams:
80
- print("Active log streams:", len(active_streams))
81
-
82
- # Filter the active streams by a substring if provided
83
- if stream_filter and active_streams:
84
- print(f"Filtering active log streams by '{stream_filter}'...")
85
- active_streams = [stream for stream in active_streams if stream_filter in stream]
86
-
87
- for stream in active_streams:
88
- print(f"\t - {stream}")
89
-
90
- # Return the active log streams
91
- return active_streams
92
-
93
-
94
38
  def get_latest_log_events(client, log_group_name, start_time, end_time=None, stream_filter=None):
95
39
  """Retrieve the latest log events from the active/filtered log streams in a CloudWatch Logs group."""
96
40
 
@@ -99,11 +43,15 @@ def get_latest_log_events(client, log_group_name, start_time, end_time=None, str
99
43
  get_latest_log_events.first_run = True
100
44
 
101
45
  log_events = []
102
- start_time_ms = int(start_time.timestamp() * 1000) # Convert start_time to milliseconds
46
+ start_time_ms = int(start_time.timestamp() * 1000)
47
+
48
+ # Use the util function to get active streams
49
+ active_streams = get_active_log_streams(log_group_name, start_time_ms, stream_filter, client)
103
50
 
104
- # Get the active log streams with events since start_time
105
- active_streams = get_active_log_streams(client, log_group_name, start_time_ms, stream_filter)
106
51
  if active_streams:
52
+ print(f"Active log streams: {len(active_streams)}")
53
+ for stream in active_streams:
54
+ print(f"\t - {stream}")
107
55
  print(f"Processing log events from {date_display(start_time)} on {len(active_streams)} active log streams...")
108
56
  get_latest_log_events.first_run = False
109
57
  else:
@@ -114,50 +62,22 @@ def get_latest_log_events(client, log_group_name, start_time, end_time=None, str
114
62
  print("Monitoring for new events...")
115
63
  return log_events
116
64
 
117
- # Iterate over the active streams and fetch log events
65
+ # Use the util function to stream events from each log stream
118
66
  for log_stream_name in active_streams:
119
- params = {
120
- "logGroupName": log_group_name,
121
- "logStreamName": log_stream_name,
122
- "startTime": start_time_ms, # Use start_time in milliseconds
123
- "startFromHead": True, # Start from the nearest event to start_time
124
- }
125
- next_event_token = None
126
- if end_time is not None:
127
- params["endTime"] = int(end_time.timestamp() * 1000)
128
-
129
- # Process the log events from this log stream
130
67
  spinner = Spinner("lightpurple", f"Pulling events from {log_stream_name}:")
131
68
  spinner.start()
132
69
  log_stream_events = 0
133
70
 
134
- # Get the log events for the active log stream
135
- while True:
136
- if next_event_token:
137
- params["nextToken"] = next_event_token
138
- params.pop("startTime", None) # Remove startTime when using nextToken
71
+ # Stream events using the util function
72
+ for event in stream_log_events(
73
+ log_group_name, log_stream_name, start_time, end_time, follow=False, client=client
74
+ ):
75
+ log_stream_events += 1
76
+ log_events.append(event)
139
77
 
140
- # Fetch the log events (this call takes a while: optimize if we can)
141
- events_response = client.get_log_events(**params)
78
+ spinner.stop()
79
+ print(f"Processed {log_stream_events} events from {log_stream_name} (Total: {len(log_events)})")
142
80
 
143
- events = events_response.get("events", [])
144
- for event in events:
145
- event["logStreamName"] = log_stream_name
146
-
147
- # Add the log stream events to our list of all log events
148
- log_stream_events += len(events)
149
- log_events.extend(events)
150
-
151
- # Handle pagination for log events
152
- next_event_token = events_response.get("nextForwardToken")
153
-
154
- # Break the loop if there are no more events to fetch
155
- if not next_event_token or next_event_token == params.get("nextToken"):
156
- spinner.stop()
157
- print(f"Processed {log_stream_events} events from {log_stream_name} (Total: {len(log_events)})")
158
- break
159
-
160
- # Return the log events
161
81
  return log_events
162
82
 
163
83
 
@@ -206,6 +126,7 @@ def monitor_log_group(
206
126
  print(f"Monitoring log group: {log_group_name} from {date_display(start_time)}")
207
127
  print(f"Log levels: {log_levels}")
208
128
  print(f"Search terms: {search_terms}")
129
+
209
130
  while True:
210
131
  # Get the latest log events with stream filtering if provided
211
132
  all_log_events = get_latest_log_events(client, log_group_name, start_time, end_time, stream_filter)
@@ -218,7 +139,6 @@ def monitor_log_group(
218
139
 
219
140
  # Check the search terms
220
141
  if not search_terms or any(term in event["message"].lower() for term in search_terms):
221
-
222
142
  # Calculate the start and end index for this match
223
143
  start_index = max(i - before, 0)
224
144
  end_index = min(i + after, len(all_log_events) - 1)
@@ -0,0 +1,137 @@
1
+ """AWS CloudWatch utility functions for Workbench."""
2
+
3
+ import time
4
+ import logging
5
+ from datetime import datetime, timezone
6
+ from typing import List, Optional, Dict, Generator
7
+ from urllib.parse import quote
8
+ from workbench.core.cloud_platform.aws.aws_account_clamp import AWSAccountClamp
9
+
10
+ log = logging.getLogger("workbench")
11
+
12
+
13
+ def get_cloudwatch_client():
14
+ """Get the CloudWatch Logs client using the Workbench assumed role session."""
15
+ session = AWSAccountClamp().boto3_session
16
+ return session.client("logs")
17
+
18
+
19
+ def get_cloudwatch_logs_url(log_group: str, log_stream: str) -> Optional[str]:
20
+ """
21
+ Generate CloudWatch logs URL for the specified log group and stream.
22
+
23
+ Args:
24
+ log_group: Log group name (e.g., '/aws/batch/job')
25
+ log_stream: Log stream name
26
+
27
+ Returns:
28
+ CloudWatch console URL or None if unable to generate
29
+ """
30
+ try:
31
+ region = AWSAccountClamp().region
32
+
33
+ # URL encode the log group and stream
34
+ encoded_group = quote(log_group, safe="")
35
+ encoded_stream = quote(log_stream, safe="")
36
+
37
+ return (
38
+ f"https://{region}.console.aws.amazon.com/cloudwatch/home?"
39
+ f"region={region}#logsV2:log-groups/log-group/{encoded_group}"
40
+ f"/log-events/{encoded_stream}"
41
+ )
42
+ except Exception as e: # noqa: BLE001
43
+ log.warning(f"Failed to generate CloudWatch logs URL: {e}")
44
+ return None
45
+
46
+
47
+ def get_active_log_streams(
48
+ log_group_name: str, start_time_ms: int, stream_filter: Optional[str] = None, client=None
49
+ ) -> List[str]:
50
+ """Retrieve log streams that have events after the specified start time."""
51
+ if not client:
52
+ client = get_cloudwatch_client()
53
+ active_streams = []
54
+ stream_params = {
55
+ "logGroupName": log_group_name,
56
+ "orderBy": "LastEventTime",
57
+ "descending": True,
58
+ }
59
+ while True:
60
+ response = client.describe_log_streams(**stream_params)
61
+ log_streams = response.get("logStreams", [])
62
+ for log_stream in log_streams:
63
+ log_stream_name = log_stream["logStreamName"]
64
+ last_event_timestamp = log_stream.get("lastEventTimestamp", 0)
65
+ if last_event_timestamp >= start_time_ms:
66
+ active_streams.append(log_stream_name)
67
+ else:
68
+ break
69
+ if "nextToken" in response:
70
+ stream_params["nextToken"] = response["nextToken"]
71
+ else:
72
+ break
73
+ # Sort and filter streams
74
+ active_streams.sort()
75
+ if stream_filter and active_streams:
76
+ active_streams = [stream for stream in active_streams if stream_filter in stream]
77
+ return active_streams
78
+
79
+
80
+ def stream_log_events(
81
+ log_group_name: str,
82
+ log_stream_name: str,
83
+ start_time: Optional[datetime] = None,
84
+ end_time: Optional[datetime] = None,
85
+ follow: bool = False,
86
+ client=None,
87
+ ) -> Generator[Dict, None, None]:
88
+ """
89
+ Stream log events from a specific log stream.
90
+ Yields:
91
+ Log events as dictionaries
92
+ """
93
+ if not client:
94
+ client = get_cloudwatch_client()
95
+ params = {"logGroupName": log_group_name, "logStreamName": log_stream_name, "startFromHead": True}
96
+ if start_time:
97
+ params["startTime"] = int(start_time.timestamp() * 1000)
98
+ if end_time:
99
+ params["endTime"] = int(end_time.timestamp() * 1000)
100
+ next_token = None
101
+ while True:
102
+ if next_token:
103
+ params["nextToken"] = next_token
104
+ params.pop("startTime", None)
105
+ try:
106
+ response = client.get_log_events(**params)
107
+ events = response.get("events", [])
108
+ for event in events:
109
+ event["logStreamName"] = log_stream_name
110
+ yield event
111
+ next_token = response.get("nextForwardToken")
112
+ # Break if no more events or same token
113
+ if not next_token or next_token == params.get("nextToken"):
114
+ if not follow:
115
+ break
116
+ time.sleep(2)
117
+ except client.exceptions.ResourceNotFoundException:
118
+ if not follow:
119
+ break
120
+ time.sleep(2)
121
+
122
+
123
+ def print_log_event(
124
+ event: dict, show_stream: bool = True, local_time: bool = True, custom_format: Optional[str] = None
125
+ ):
126
+ """Print a formatted log event."""
127
+ timestamp = datetime.fromtimestamp(event["timestamp"] / 1000, tz=timezone.utc)
128
+ if local_time:
129
+ timestamp = timestamp.astimezone()
130
+ message = event["message"].rstrip()
131
+ if custom_format:
132
+ # Allow custom formatting
133
+ print(custom_format.format(stream=event.get("logStreamName", ""), time=timestamp, message=message))
134
+ elif show_stream and "logStreamName" in event:
135
+ print(f"[{event['logStreamName']}] [{timestamp:%Y-%m-%d %I:%M%p}] {message}")
136
+ else:
137
+ print(f"[{timestamp:%H:%M:%S}] {message}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: workbench
3
- Version: 0.8.167
3
+ Version: 0.8.169
4
4
  Summary: Workbench: A Dashboard and Python API for creating and deploying AWS SageMaker Model Pipelines
5
5
  Author-email: SuperCowPowers LLC <support@supercowpowers.com>
6
6
  License-Expression: MIT
@@ -167,8 +167,8 @@ workbench/resources/open_source_api.key,sha256=3S0OTblsmC0msUPdE_dbBmI83xJNmYscu
167
167
  workbench/resources/signature_verify_pub.pem,sha256=V3-u-3_z2PH-805ybkKvzDOBwAbvHxcKn0jLBImEtzM,272
168
168
  workbench/scripts/check_double_bond_stereo.py,sha256=p5hnL54Weq77ES0HCELq9JeoM-PyUGkvVSeWYF2dKyo,7776
169
169
  workbench/scripts/glue_launcher.py,sha256=bIKQvfGxpAhzbeNvTnHfRW_5kQhY-169_868ZnCejJk,10692
170
- workbench/scripts/ml_pipeline_launcher.py,sha256=AJF7An1fMdyj3SNPHnGE__NuR89vtPeszqGw6WInGt4,3712
171
- workbench/scripts/monitor_cloud_watch.py,sha256=5QODOSVmfunf6L-gtK1dhW93z9ZbMy2UEsuyR2tij5E,12463
170
+ workbench/scripts/ml_pipeline_launcher.py,sha256=fjI35SXi9CDSQ6Lan7qGcLAHkVCDioyhbPlo0eDHDxQ,4913
171
+ workbench/scripts/monitor_cloud_watch.py,sha256=s7MY4bsHts0nup9G0lWESCvgJZ9Mw1Eo-c8aKRgLjMw,9235
172
172
  workbench/scripts/redis_expire.py,sha256=DxI_RKSNlrW2BsJZXcsSbaWGBgPZdPhtzHjV9SUtElE,1120
173
173
  workbench/scripts/redis_report.py,sha256=iaJSuGPyLCs6e0TMcZDoT0YyJ43xJ1u74YD8FLnnUg4,990
174
174
  workbench/scripts/show_config.py,sha256=ff2wIKIlOktoitcrhk2r2B4I4N_ynXkEHB11l5nn0nA,548
@@ -199,6 +199,7 @@ workbench/utils/bulk_utils.py,sha256=s1lYN2Uk536MNGetekLYL_VL0N34hUjk1FX9BAz3Qu0
199
199
  workbench/utils/cache.py,sha256=0R5RXYEz_XHARK3anmQC4VRMawMks_cJ8S4vwC2roAE,5524
200
200
  workbench/utils/chem_utils.py,sha256=tLTAvLKTOiYSzbVQF0M8V5-ej36IVgr21CNB2vVJjYQ,56780
201
201
  workbench/utils/cloudwatch_handler.py,sha256=t0L280Qa1nMq95dwnf8lB5g8FHrQAyGY5S4JwP3yIa8,5165
202
+ workbench/utils/cloudwatch_utils.py,sha256=wXSqKcJlSnHyC0D6d4RsH8wwmx_0CsffcetUgXlZ_78,4828
202
203
  workbench/utils/color_utils.py,sha256=TmDGLK44t975lkfjt_1O-ee02QxrKfke7vPuXb-V-Uo,11779
203
204
  workbench/utils/config_manager.py,sha256=SBBmO1RGCQ_Zyh91tDxL1HOm5B0v38ImlLnOsEKzXPU,17649
204
205
  workbench/utils/dashboard_metrics.py,sha256=cNFI0GIAjd_IiDzM1oebsJ2QkRZuW068W_66ZC3J100,7398
@@ -275,9 +276,9 @@ workbench/web_interface/page_views/main_page.py,sha256=X4-KyGTKLAdxR-Zk2niuLJB2Y
275
276
  workbench/web_interface/page_views/models_page_view.py,sha256=M0bdC7bAzLyIaE2jviY12FF4abdMFZmg6sFuOY_LaGI,2650
276
277
  workbench/web_interface/page_views/page_view.py,sha256=Gh6YnpOGlUejx-bHZAf5pzqoQ1H1R0OSwOpGhOBO06w,455
277
278
  workbench/web_interface/page_views/pipelines_page_view.py,sha256=v2pxrIbsHBcYiblfius3JK766NZ7ciD2yPx0t3E5IJo,2656
278
- workbench-0.8.167.dist-info/licenses/LICENSE,sha256=z4QMMPlLJkZjU8VOKqJkZiQZCEZ--saIU2Z8-p3aVc0,1080
279
- workbench-0.8.167.dist-info/METADATA,sha256=RYkpkQ2sNwdHY4IPmFBH_3UcW-zKAZaonN2hVc_FTuc,9210
280
- workbench-0.8.167.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
- workbench-0.8.167.dist-info/entry_points.txt,sha256=V_v6hQ4DYoCJnTnqbm036reCri_CXkA_ONcRSuF5OKg,305
282
- workbench-0.8.167.dist-info/top_level.txt,sha256=Dhy72zTxaA_o_yRkPZx5zw-fwumnjGaeGf0hBN3jc_w,10
283
- workbench-0.8.167.dist-info/RECORD,,
279
+ workbench-0.8.169.dist-info/licenses/LICENSE,sha256=z4QMMPlLJkZjU8VOKqJkZiQZCEZ--saIU2Z8-p3aVc0,1080
280
+ workbench-0.8.169.dist-info/METADATA,sha256=AoOujKSh6ueEHjNLcz8g5UMWZr5bGZrzk-ycBaw62n0,9210
281
+ workbench-0.8.169.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
282
+ workbench-0.8.169.dist-info/entry_points.txt,sha256=V_v6hQ4DYoCJnTnqbm036reCri_CXkA_ONcRSuF5OKg,305
283
+ workbench-0.8.169.dist-info/top_level.txt,sha256=Dhy72zTxaA_o_yRkPZx5zw-fwumnjGaeGf0hBN3jc_w,10
284
+ workbench-0.8.169.dist-info/RECORD,,