workbench 0.8.168__py3-none-any.whl → 0.8.170__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of workbench might be problematic. Click here for more details.
- workbench/api/model.py +3 -0
- workbench/core/artifacts/endpoint_core.py +16 -5
- workbench/core/artifacts/monitor_core.py +2 -2
- workbench/core/transforms/model_to_endpoint/model_to_endpoint.py +36 -6
- workbench/scripts/ml_pipeline_launcher.py +49 -51
- {workbench-0.8.168.dist-info → workbench-0.8.170.dist-info}/METADATA +1 -1
- {workbench-0.8.168.dist-info → workbench-0.8.170.dist-info}/RECORD +11 -11
- {workbench-0.8.168.dist-info → workbench-0.8.170.dist-info}/WHEEL +0 -0
- {workbench-0.8.168.dist-info → workbench-0.8.170.dist-info}/entry_points.txt +0 -0
- {workbench-0.8.168.dist-info → workbench-0.8.170.dist-info}/licenses/LICENSE +0 -0
- {workbench-0.8.168.dist-info → workbench-0.8.170.dist-info}/top_level.txt +0 -0
workbench/api/model.py
CHANGED
|
@@ -40,6 +40,7 @@ class Model(ModelCore):
|
|
|
40
40
|
mem_size: int = 2048,
|
|
41
41
|
max_concurrency: int = 5,
|
|
42
42
|
instance: str = "ml.t2.medium",
|
|
43
|
+
data_capture: bool = False,
|
|
43
44
|
) -> Endpoint:
|
|
44
45
|
"""Create an Endpoint from the Model.
|
|
45
46
|
|
|
@@ -50,6 +51,7 @@ class Model(ModelCore):
|
|
|
50
51
|
mem_size (int): The memory size for the Endpoint in MB (default: 2048)
|
|
51
52
|
max_concurrency (int): The maximum concurrency for the Endpoint (default: 5)
|
|
52
53
|
instance (str): The instance type to use for Realtime(serverless=False) Endpoints (default: "ml.t2.medium")
|
|
54
|
+
data_capture (bool): Enable data capture for the Endpoint (default: False)
|
|
53
55
|
|
|
54
56
|
Returns:
|
|
55
57
|
Endpoint: The Endpoint created from the Model
|
|
@@ -73,6 +75,7 @@ class Model(ModelCore):
|
|
|
73
75
|
model_to_endpoint.transform(
|
|
74
76
|
mem_size=mem_size,
|
|
75
77
|
max_concurrency=max_concurrency,
|
|
78
|
+
data_capture=data_capture,
|
|
76
79
|
)
|
|
77
80
|
|
|
78
81
|
# Set the Endpoint Owner and Return the Endpoint
|
|
@@ -972,12 +972,23 @@ class EndpointCore(Artifact):
|
|
|
972
972
|
cls.log.info(f"Deleting Monitoring Schedule {schedule['MonitoringScheduleName']}...")
|
|
973
973
|
cls.sm_client.delete_monitoring_schedule(MonitoringScheduleName=schedule["MonitoringScheduleName"])
|
|
974
974
|
|
|
975
|
-
# Recursively delete all endpoint S3 artifacts (inference,
|
|
975
|
+
# Recursively delete all endpoint S3 artifacts (inference, etc)
|
|
976
|
+
# Note: We do not want to delete the data_capture/ files since these
|
|
977
|
+
# might be used for collection and data drift analysis
|
|
976
978
|
base_endpoint_path = f"{cls.endpoints_s3_path}/{endpoint_name}"
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
979
|
+
all_s3_objects = wr.s3.list_objects(base_endpoint_path, boto3_session=cls.boto3_session)
|
|
980
|
+
|
|
981
|
+
# Filter out objects that contain 'data_capture/' in their path
|
|
982
|
+
s3_objects_to_delete = [obj for obj in all_s3_objects if "/data_capture/" not in obj]
|
|
983
|
+
cls.log.info(f"Found {len(all_s3_objects)} total objects at {base_endpoint_path}")
|
|
984
|
+
cls.log.info(f"Filtering out data_capture files, will delete {len(s3_objects_to_delete)} objects...")
|
|
985
|
+
cls.log.info(f"Objects to delete: {s3_objects_to_delete}")
|
|
986
|
+
|
|
987
|
+
if s3_objects_to_delete:
|
|
988
|
+
wr.s3.delete_objects(s3_objects_to_delete, boto3_session=cls.boto3_session)
|
|
989
|
+
cls.log.info(f"Successfully deleted {len(s3_objects_to_delete)} objects")
|
|
990
|
+
else:
|
|
991
|
+
cls.log.info("No objects to delete (only data_capture files found)")
|
|
981
992
|
|
|
982
993
|
# Delete any dataframes that were stored in the Dataframe Cache
|
|
983
994
|
cls.log.info("Deleting Dataframe Cache...")
|
|
@@ -186,11 +186,11 @@ class MonitorCore:
|
|
|
186
186
|
|
|
187
187
|
# Log the data capture operation
|
|
188
188
|
self.log.important(f"Enabling Data Capture for {self.endpoint_name} --> {self.data_capture_path}")
|
|
189
|
-
self.log.important("This
|
|
189
|
+
self.log.important("This will redeploy the endpoint...")
|
|
190
190
|
|
|
191
191
|
# Create and apply the data capture configuration
|
|
192
192
|
data_capture_config = DataCaptureConfig(
|
|
193
|
-
enable_capture=True,
|
|
193
|
+
enable_capture=True,
|
|
194
194
|
sampling_percentage=capture_percentage,
|
|
195
195
|
destination_s3_uri=self.data_capture_path,
|
|
196
196
|
)
|
|
@@ -5,6 +5,7 @@ from sagemaker import ModelPackage
|
|
|
5
5
|
from sagemaker.serializers import CSVSerializer
|
|
6
6
|
from sagemaker.deserializers import CSVDeserializer
|
|
7
7
|
from sagemaker.serverless import ServerlessInferenceConfig
|
|
8
|
+
from sagemaker.model_monitor import DataCaptureConfig
|
|
8
9
|
|
|
9
10
|
# Local Imports
|
|
10
11
|
from workbench.core.transforms.transform import Transform, TransformInput, TransformOutput
|
|
@@ -51,27 +52,38 @@ class ModelToEndpoint(Transform):
|
|
|
51
52
|
EndpointCore.managed_delete(self.output_name)
|
|
52
53
|
|
|
53
54
|
# Get the Model Package ARN for our input model
|
|
54
|
-
|
|
55
|
-
model_package_arn = input_model.model_package_arn()
|
|
55
|
+
workbench_model = ModelCore(self.input_name)
|
|
56
56
|
|
|
57
57
|
# Deploy the model
|
|
58
|
-
self._deploy_model(
|
|
58
|
+
self._deploy_model(workbench_model, **kwargs)
|
|
59
59
|
|
|
60
60
|
# Add this endpoint to the set of registered endpoints for the model
|
|
61
|
-
|
|
61
|
+
workbench_model.register_endpoint(self.output_name)
|
|
62
62
|
|
|
63
63
|
# This ensures that the endpoint is ready for use
|
|
64
64
|
time.sleep(5) # We wait for AWS Lag
|
|
65
65
|
end = EndpointCore(self.output_name)
|
|
66
66
|
self.log.important(f"Endpoint {end.name} is ready for use")
|
|
67
67
|
|
|
68
|
-
def _deploy_model(
|
|
68
|
+
def _deploy_model(
|
|
69
|
+
self,
|
|
70
|
+
workbench_model: ModelCore,
|
|
71
|
+
mem_size: int = 2048,
|
|
72
|
+
max_concurrency: int = 5,
|
|
73
|
+
data_capture: bool = False,
|
|
74
|
+
capture_percentage: int = 100,
|
|
75
|
+
):
|
|
69
76
|
"""Internal Method: Deploy the Model
|
|
70
77
|
|
|
71
78
|
Args:
|
|
72
|
-
|
|
79
|
+
workbench_model(ModelCore): The Workbench ModelCore object to deploy
|
|
80
|
+
mem_size(int): Memory size for serverless deployment
|
|
81
|
+
max_concurrency(int): Max concurrency for serverless deployment
|
|
82
|
+
data_capture(bool): Enable data capture during deployment
|
|
83
|
+
capture_percentage(int): Percentage of data to capture. Defaults to 100.
|
|
73
84
|
"""
|
|
74
85
|
# Grab the specified Model Package
|
|
86
|
+
model_package_arn = workbench_model.model_package_arn()
|
|
75
87
|
model_package = ModelPackage(
|
|
76
88
|
role=self.workbench_role_arn,
|
|
77
89
|
model_package_arn=model_package_arn,
|
|
@@ -95,6 +107,23 @@ class ModelToEndpoint(Transform):
|
|
|
95
107
|
max_concurrency=max_concurrency,
|
|
96
108
|
)
|
|
97
109
|
|
|
110
|
+
# Configure data capture if requested (and not serverless)
|
|
111
|
+
data_capture_config = None
|
|
112
|
+
if data_capture and not self.serverless:
|
|
113
|
+
# Set up the S3 path for data capture
|
|
114
|
+
base_endpoint_path = f"{workbench_model.endpoints_s3_path}/{self.output_name}"
|
|
115
|
+
data_capture_path = f"{base_endpoint_path}/data_capture"
|
|
116
|
+
self.log.important(f"Configuring Data Capture --> {data_capture_path}")
|
|
117
|
+
data_capture_config = DataCaptureConfig(
|
|
118
|
+
enable_capture=True,
|
|
119
|
+
sampling_percentage=capture_percentage,
|
|
120
|
+
destination_s3_uri=data_capture_path,
|
|
121
|
+
)
|
|
122
|
+
elif data_capture and self.serverless:
|
|
123
|
+
self.log.warning(
|
|
124
|
+
"Data capture is not supported for serverless endpoints. Skipping data capture configuration."
|
|
125
|
+
)
|
|
126
|
+
|
|
98
127
|
# Deploy the Endpoint
|
|
99
128
|
self.log.important(f"Deploying the Endpoint {self.output_name}...")
|
|
100
129
|
model_package.deploy(
|
|
@@ -104,6 +133,7 @@ class ModelToEndpoint(Transform):
|
|
|
104
133
|
endpoint_name=self.output_name,
|
|
105
134
|
serializer=CSVSerializer(),
|
|
106
135
|
deserializer=CSVDeserializer(),
|
|
136
|
+
data_capture_config=data_capture_config,
|
|
107
137
|
tags=aws_tags,
|
|
108
138
|
)
|
|
109
139
|
|
|
@@ -27,60 +27,56 @@ def get_batch_role_arn() -> str:
|
|
|
27
27
|
return f"arn:aws:iam::{account_id}:role/Workbench-BatchRole"
|
|
28
28
|
|
|
29
29
|
|
|
30
|
-
def
|
|
31
|
-
"""
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
{"name": "PYTHONUNBUFFERED", "value": "1"},
|
|
46
|
-
],
|
|
47
|
-
# "networkConfiguration": {"assignPublicIp": "ENABLED"}, # Required for ECR Image Pull (when not in VPC)
|
|
48
|
-
},
|
|
49
|
-
timeout={"attemptDurationSeconds": 10800}, # 3 hours
|
|
50
|
-
)
|
|
51
|
-
log.info(f"Job definition ready: {name} (revision {response['revision']})")
|
|
52
|
-
return name
|
|
30
|
+
def _log_cloudwatch_link(job: dict, message_prefix: str = "View logs") -> None:
|
|
31
|
+
"""
|
|
32
|
+
Helper method to log CloudWatch logs link with clickable URL and full URL display.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
job: Batch job description dictionary
|
|
36
|
+
message_prefix: Prefix for the log message (default: "View logs")
|
|
37
|
+
"""
|
|
38
|
+
log_stream = job.get("container", {}).get("logStreamName")
|
|
39
|
+
logs_url = get_cloudwatch_logs_url(log_group="/aws/batch/job", log_stream=log_stream)
|
|
40
|
+
if logs_url:
|
|
41
|
+
clickable_url = f"\033]8;;{logs_url}\033\\{logs_url}\033]8;;\033\\"
|
|
42
|
+
log.info(f"{message_prefix}: {clickable_url}")
|
|
43
|
+
else:
|
|
44
|
+
log.info("Check AWS Batch console for logs")
|
|
53
45
|
|
|
54
46
|
|
|
55
|
-
def run_batch_job(script_path: str) -> int:
|
|
47
|
+
def run_batch_job(script_path: str, size: str = "small") -> int:
|
|
56
48
|
"""
|
|
57
49
|
Submit and monitor an AWS Batch job for ML pipeline execution.
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
2. Submits a Batch job to run the script in a container
|
|
61
|
-
3. Monitors job status until completion
|
|
62
|
-
4. Returns the job's exit code
|
|
50
|
+
|
|
51
|
+
Uploads script to S3, submits Batch job, monitors until completion or 2 minutes of RUNNING.
|
|
63
52
|
|
|
64
53
|
Args:
|
|
65
54
|
script_path: Local path to the ML pipeline script
|
|
55
|
+
size: Job size tier - "small" (default), "medium", or "large"
|
|
56
|
+
- small: 2 vCPU, 4GB RAM for lightweight processing
|
|
57
|
+
- medium: 4 vCPU, 8GB RAM for standard ML workloads
|
|
58
|
+
- large: 8 vCPU, 16GB RAM for heavy training/inference
|
|
66
59
|
|
|
67
60
|
Returns:
|
|
68
|
-
Exit code
|
|
61
|
+
Exit code (0 for success/disconnected, non-zero for failure)
|
|
69
62
|
"""
|
|
63
|
+
if size not in ["small", "medium", "large"]:
|
|
64
|
+
raise ValueError(f"Invalid size '{size}'. Must be 'small', 'medium', or 'large'")
|
|
65
|
+
|
|
70
66
|
batch = AWSAccountClamp().boto3_session.client("batch")
|
|
71
67
|
script_name = Path(script_path).stem
|
|
72
68
|
|
|
73
|
-
# Upload script to S3
|
|
69
|
+
# Upload script to S3
|
|
74
70
|
s3_path = f"s3://{workbench_bucket}/batch-jobs/{Path(script_path).name}"
|
|
75
71
|
log.info(f"Uploading script to {s3_path}")
|
|
76
72
|
upload_content_to_s3(Path(script_path).read_text(), s3_path)
|
|
77
73
|
|
|
78
|
-
# Submit
|
|
74
|
+
# Submit job
|
|
79
75
|
job_name = f"workbench_{script_name}_{datetime.now():%Y%m%d_%H%M%S}"
|
|
80
76
|
response = batch.submit_job(
|
|
81
77
|
jobName=job_name,
|
|
82
78
|
jobQueue="workbench-job-queue",
|
|
83
|
-
jobDefinition=
|
|
79
|
+
jobDefinition=f"workbench-ml-pipeline-{size}",
|
|
84
80
|
containerOverrides={
|
|
85
81
|
"environment": [
|
|
86
82
|
{"name": "ML_PIPELINE_S3_PATH", "value": s3_path},
|
|
@@ -89,36 +85,38 @@ def run_batch_job(script_path: str) -> int:
|
|
|
89
85
|
},
|
|
90
86
|
)
|
|
91
87
|
job_id = response["jobId"]
|
|
92
|
-
log.info(f"Submitted job: {job_name} ({job_id})")
|
|
88
|
+
log.info(f"Submitted job: {job_name} ({job_id}) using {size} tier")
|
|
93
89
|
|
|
94
|
-
# Monitor job
|
|
95
|
-
last_status = None
|
|
90
|
+
# Monitor job
|
|
91
|
+
last_status, running_start = None, None
|
|
96
92
|
while True:
|
|
97
|
-
# Check job status
|
|
98
93
|
job = batch.describe_jobs(jobs=[job_id])["jobs"][0]
|
|
99
94
|
status = job["status"]
|
|
95
|
+
|
|
100
96
|
if status != last_status:
|
|
101
97
|
log.info(f"Job status: {status}")
|
|
102
98
|
last_status = status
|
|
99
|
+
if status == "RUNNING":
|
|
100
|
+
running_start = time.time()
|
|
101
|
+
|
|
102
|
+
# Disconnect after 2 minutes of running
|
|
103
|
+
if status == "RUNNING" and running_start and (time.time() - running_start >= 120):
|
|
104
|
+
log.info("✅ ML Pipeline is running successfully!")
|
|
105
|
+
_log_cloudwatch_link(job, "📊 Monitor logs")
|
|
106
|
+
return 0
|
|
103
107
|
|
|
104
|
-
#
|
|
108
|
+
# Handle completion
|
|
105
109
|
if status in ["SUCCEEDED", "FAILED"]:
|
|
106
110
|
exit_code = job.get("attempts", [{}])[-1].get("exitCode", 1)
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
logs_url = get_cloudwatch_logs_url(log_group="/aws/batch/job", log_stream=log_stream_name)
|
|
115
|
-
if logs_url:
|
|
116
|
-
# OSC 8 hyperlink format for modern terminals
|
|
117
|
-
clickable_url = f"\033]8;;{logs_url}\033\\{logs_url}\033]8;;\033\\"
|
|
118
|
-
log.info(f"View logs: {clickable_url}")
|
|
111
|
+
msg = (
|
|
112
|
+
"Job completed successfully"
|
|
113
|
+
if status == "SUCCEEDED"
|
|
114
|
+
else f"Job failed: {job.get('statusReason', 'Unknown')}"
|
|
115
|
+
)
|
|
116
|
+
log.info(msg) if status == "SUCCEEDED" else log.error(msg)
|
|
117
|
+
_log_cloudwatch_link(job)
|
|
119
118
|
return exit_code
|
|
120
119
|
|
|
121
|
-
# Sleep a bit before next status check
|
|
122
120
|
time.sleep(10)
|
|
123
121
|
|
|
124
122
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: workbench
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.170
|
|
4
4
|
Summary: Workbench: A Dashboard and Python API for creating and deploying AWS SageMaker Model Pipelines
|
|
5
5
|
Author-email: SuperCowPowers LLC <support@supercowpowers.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -35,7 +35,7 @@ workbench/api/endpoint.py,sha256=RWGqxsCW_pMiENMb_XZlm2ZCldMS4suEBM3F5gT3hYI,381
|
|
|
35
35
|
workbench/api/feature_set.py,sha256=wzNxNjN0K2FaIC7QUIogMnoHqw2vo0iAHYlGk6fWLCw,6649
|
|
36
36
|
workbench/api/graph_store.py,sha256=LremJyPrQFgsHb7hxsctuCsoxx3p7TKtaY5qALHe6pc,4372
|
|
37
37
|
workbench/api/meta.py,sha256=1_9989cPvf3hd3tA-83hLijOGNnhwXAF8aZF45adeDQ,8596
|
|
38
|
-
workbench/api/model.py,sha256=
|
|
38
|
+
workbench/api/model.py,sha256=RkFVXnlLcMlzNKRUFr_GCmZ7IQJMyhB2lwMwd22HBBo,4691
|
|
39
39
|
workbench/api/monitor.py,sha256=kQHSFiVLRWnHekSdatMKR3QbRj1BBNrVXpZgvV83LPM,5027
|
|
40
40
|
workbench/api/parameter_store.py,sha256=7BObkuATuP6C5AG_46kCWsmuCwuh1vgMJDBSN0gTkwM,4294
|
|
41
41
|
workbench/api/pipeline.py,sha256=MSYGrDSXrRB_oQELtAlOwBfxSBTw3REAkHy5XBHau0Y,6261
|
|
@@ -53,10 +53,10 @@ workbench/core/artifacts/athena_source.py,sha256=RNmCe7s6uH4gVHpcdJcL84aSbF5Q1ah
|
|
|
53
53
|
workbench/core/artifacts/cached_artifact_mixin.py,sha256=ngqFLZ4cQx_TFouXZgXZQsv_7W6XCvxVGXXSfzzaft8,3775
|
|
54
54
|
workbench/core/artifacts/data_source_abstract.py,sha256=5IRCzFVK-17cd4NXPMRfx99vQAmQ0WHE5jcm5RfsVTg,10619
|
|
55
55
|
workbench/core/artifacts/data_source_factory.py,sha256=YL_tA5fsgubbB3dPF6T4tO0rGgz-6oo3ge4i_YXVC-M,2380
|
|
56
|
-
workbench/core/artifacts/endpoint_core.py,sha256=
|
|
56
|
+
workbench/core/artifacts/endpoint_core.py,sha256=CtLo_eqONpDvyyYtjRkrrujVVv6cJV-vRQxHef6MYdk,48841
|
|
57
57
|
workbench/core/artifacts/feature_set_core.py,sha256=055VdSYR09HP4ygAuYvIYtHQ7Ec4XxsZygpgEl5H5jQ,29136
|
|
58
58
|
workbench/core/artifacts/model_core.py,sha256=U0dSkpZMrsIgbUglVkPwAgN0gji7Oa7glOjqMQJDAzE,50927
|
|
59
|
-
workbench/core/artifacts/monitor_core.py,sha256=
|
|
59
|
+
workbench/core/artifacts/monitor_core.py,sha256=BvJ8gMxZXYZeMzAC25PVTXWiyXKtxD1qK6LqDcIByzs,37657
|
|
60
60
|
workbench/core/cloud_platform/cloud_meta.py,sha256=-g4-LTC3D0PXb3VfaXdLR1ERijKuHdffeMK_zhD-koQ,8809
|
|
61
61
|
workbench/core/cloud_platform/aws/README.md,sha256=QT5IQXoUHbIA0qQ2wO6_2P2lYjYQFVYuezc22mWY4i8,97
|
|
62
62
|
workbench/core/cloud_platform/aws/aws_account_clamp.py,sha256=OzFknZXKW7VTvnDGGX4BXKoh0i1gQ7yaEBhkLCyHFSs,6310
|
|
@@ -103,7 +103,7 @@ workbench/core/transforms/features_to_features/heavy/glue/Readme.md,sha256=TuyCa
|
|
|
103
103
|
workbench/core/transforms/features_to_model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
104
104
|
workbench/core/transforms/features_to_model/features_to_model.py,sha256=gwqdQZJUIfZv1M7uGhzzBxUwRS0thJE_o_H2IUsBT40,19789
|
|
105
105
|
workbench/core/transforms/model_to_endpoint/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
106
|
-
workbench/core/transforms/model_to_endpoint/model_to_endpoint.py,sha256=
|
|
106
|
+
workbench/core/transforms/model_to_endpoint/model_to_endpoint.py,sha256=TIYXvuK0s383PwJ4iS6fCRhuif6oIxsoWb4CpMGJjY4,6358
|
|
107
107
|
workbench/core/transforms/pandas_transforms/__init__.py,sha256=xL4MT8-fZ1SFqDbTLc8XyxjupHtB1YR6Ej0AC2nwd7I,894
|
|
108
108
|
workbench/core/transforms/pandas_transforms/data_to_pandas.py,sha256=sJHPeuNF8Q8aQqgRnkdWkyvur5cbggdUVIwR-xF3Dlo,3621
|
|
109
109
|
workbench/core/transforms/pandas_transforms/features_to_pandas.py,sha256=af6xdPt2V4zhh-SzQa_UYxdmNMzMLXbrbsznV5QoIJg,3441
|
|
@@ -167,7 +167,7 @@ workbench/resources/open_source_api.key,sha256=3S0OTblsmC0msUPdE_dbBmI83xJNmYscu
|
|
|
167
167
|
workbench/resources/signature_verify_pub.pem,sha256=V3-u-3_z2PH-805ybkKvzDOBwAbvHxcKn0jLBImEtzM,272
|
|
168
168
|
workbench/scripts/check_double_bond_stereo.py,sha256=p5hnL54Weq77ES0HCELq9JeoM-PyUGkvVSeWYF2dKyo,7776
|
|
169
169
|
workbench/scripts/glue_launcher.py,sha256=bIKQvfGxpAhzbeNvTnHfRW_5kQhY-169_868ZnCejJk,10692
|
|
170
|
-
workbench/scripts/ml_pipeline_launcher.py,sha256=
|
|
170
|
+
workbench/scripts/ml_pipeline_launcher.py,sha256=fjI35SXi9CDSQ6Lan7qGcLAHkVCDioyhbPlo0eDHDxQ,4913
|
|
171
171
|
workbench/scripts/monitor_cloud_watch.py,sha256=s7MY4bsHts0nup9G0lWESCvgJZ9Mw1Eo-c8aKRgLjMw,9235
|
|
172
172
|
workbench/scripts/redis_expire.py,sha256=DxI_RKSNlrW2BsJZXcsSbaWGBgPZdPhtzHjV9SUtElE,1120
|
|
173
173
|
workbench/scripts/redis_report.py,sha256=iaJSuGPyLCs6e0TMcZDoT0YyJ43xJ1u74YD8FLnnUg4,990
|
|
@@ -276,9 +276,9 @@ workbench/web_interface/page_views/main_page.py,sha256=X4-KyGTKLAdxR-Zk2niuLJB2Y
|
|
|
276
276
|
workbench/web_interface/page_views/models_page_view.py,sha256=M0bdC7bAzLyIaE2jviY12FF4abdMFZmg6sFuOY_LaGI,2650
|
|
277
277
|
workbench/web_interface/page_views/page_view.py,sha256=Gh6YnpOGlUejx-bHZAf5pzqoQ1H1R0OSwOpGhOBO06w,455
|
|
278
278
|
workbench/web_interface/page_views/pipelines_page_view.py,sha256=v2pxrIbsHBcYiblfius3JK766NZ7ciD2yPx0t3E5IJo,2656
|
|
279
|
-
workbench-0.8.
|
|
280
|
-
workbench-0.8.
|
|
281
|
-
workbench-0.8.
|
|
282
|
-
workbench-0.8.
|
|
283
|
-
workbench-0.8.
|
|
284
|
-
workbench-0.8.
|
|
279
|
+
workbench-0.8.170.dist-info/licenses/LICENSE,sha256=z4QMMPlLJkZjU8VOKqJkZiQZCEZ--saIU2Z8-p3aVc0,1080
|
|
280
|
+
workbench-0.8.170.dist-info/METADATA,sha256=GbS745jAMPDykgLqfLcwjb9nRnczT-uV9Q11GbPBAX8,9210
|
|
281
|
+
workbench-0.8.170.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
282
|
+
workbench-0.8.170.dist-info/entry_points.txt,sha256=V_v6hQ4DYoCJnTnqbm036reCri_CXkA_ONcRSuF5OKg,305
|
|
283
|
+
workbench-0.8.170.dist-info/top_level.txt,sha256=Dhy72zTxaA_o_yRkPZx5zw-fwumnjGaeGf0hBN3jc_w,10
|
|
284
|
+
workbench-0.8.170.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|