workbench 0.8.160__py3-none-any.whl → 0.8.162__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of workbench might be problematic. Click here for more details.
- workbench/api/feature_set.py +1 -0
- workbench/api/parameter_store.py +5 -0
- workbench/core/artifacts/model_core.py +3 -3
- workbench/core/transforms/features_to_model/features_to_model.py +3 -1
- workbench/model_scripts/pytorch_model/generated_model_script.py +57 -38
- workbench/model_scripts/pytorch_model/pytorch.template +54 -26
- workbench/model_scripts/script_generation.py +2 -1
- {workbench-0.8.160.dist-info → workbench-0.8.162.dist-info}/METADATA +1 -1
- {workbench-0.8.160.dist-info → workbench-0.8.162.dist-info}/RECORD +13 -13
- {workbench-0.8.160.dist-info → workbench-0.8.162.dist-info}/WHEEL +0 -0
- {workbench-0.8.160.dist-info → workbench-0.8.162.dist-info}/entry_points.txt +0 -0
- {workbench-0.8.160.dist-info → workbench-0.8.162.dist-info}/licenses/LICENSE +0 -0
- {workbench-0.8.160.dist-info → workbench-0.8.162.dist-info}/top_level.txt +0 -0
workbench/api/feature_set.py
CHANGED
|
@@ -108,6 +108,7 @@ class FeatureSet(FeatureSetCore):
|
|
|
108
108
|
training_image (str, optional): The training image to use (default: "xgb_training")
|
|
109
109
|
inference_image (str, optional): The inference image to use (default: "xgb_inference")
|
|
110
110
|
inference_arch (str, optional): The architecture to use for inference (default: "x86_64")
|
|
111
|
+
kwargs (dict, optional): Additional keyword arguments to pass to the model
|
|
111
112
|
|
|
112
113
|
Returns:
|
|
113
114
|
Model: The Model created from the FeatureSet (or None if the Model could not be created)
|
workbench/api/parameter_store.py
CHANGED
|
@@ -133,3 +133,8 @@ if __name__ == "__main__":
|
|
|
133
133
|
# Out of scope tests
|
|
134
134
|
param_store.upsert("test", "value")
|
|
135
135
|
param_store.delete("test")
|
|
136
|
+
|
|
137
|
+
# Recursive delete test
|
|
138
|
+
param_store.upsert("/workbench/test/test1", "value1")
|
|
139
|
+
param_store.upsert("/workbench/test/test2", "value2")
|
|
140
|
+
param_store.delete_recursive("workbench/test/")
|
|
@@ -926,15 +926,15 @@ class ModelCore(Artifact):
|
|
|
926
926
|
wr.s3.delete_objects(s3_delete_path, boto3_session=cls.boto3_session)
|
|
927
927
|
|
|
928
928
|
# Delete any dataframes that were stored in the Dataframe Cache
|
|
929
|
-
cls.log.info("Deleting Dataframe Cache...")
|
|
929
|
+
cls.log.info("Deleting Dataframe Cache Entries...")
|
|
930
930
|
cls.df_cache.delete_recursive(model_group_name)
|
|
931
931
|
|
|
932
932
|
# Delete any dataframes that were stored in the Dataframe Store
|
|
933
|
-
cls.log.info("Deleting Dataframe Store...")
|
|
933
|
+
cls.log.info("Deleting Dataframe Store Entries...")
|
|
934
934
|
cls.df_store.delete_recursive(f"workbench/models/{model_group_name}")
|
|
935
935
|
|
|
936
936
|
# Delete anything we might have stored in the Parameter Store
|
|
937
|
-
cls.log.info("Deleting Parameter Store...")
|
|
937
|
+
cls.log.info("Deleting Parameter Store Entries...")
|
|
938
938
|
cls.param_store.delete_recursive(f"workbench/models/{model_group_name}")
|
|
939
939
|
|
|
940
940
|
def _set_model_type(self, model_type: ModelType):
|
|
@@ -164,6 +164,7 @@ class FeaturesToModel(Transform):
|
|
|
164
164
|
"model_metrics_s3_path": self.model_training_root,
|
|
165
165
|
"train_all_data": train_all_data,
|
|
166
166
|
"id_column": feature_set.id_column,
|
|
167
|
+
"hyperparameters": kwargs.get("hyperparameters", {}),
|
|
167
168
|
}
|
|
168
169
|
|
|
169
170
|
# Custom Script
|
|
@@ -280,11 +281,12 @@ class FeaturesToModel(Transform):
|
|
|
280
281
|
# Call the Model onboard method
|
|
281
282
|
output_model.onboard_with_args(self.model_type, self.target_column, self.model_feature_list)
|
|
282
283
|
|
|
283
|
-
def create_and_register_model(self, aws_region=None):
|
|
284
|
+
def create_and_register_model(self, aws_region=None, **kwargs):
|
|
284
285
|
"""Create and Register the Model
|
|
285
286
|
|
|
286
287
|
Args:
|
|
287
288
|
aws_region (str, optional): AWS Region to use (default None)
|
|
289
|
+
**kwargs: Additional keyword arguments to pass to the model registration
|
|
288
290
|
"""
|
|
289
291
|
|
|
290
292
|
# Get the metadata/tags to push into AWS
|
|
@@ -8,7 +8,7 @@ import numpy as np
|
|
|
8
8
|
os.environ["TORCH_FORCE_NO_WEIGHTS_ONLY_LOAD"] = "1"
|
|
9
9
|
from pytorch_tabular import TabularModel
|
|
10
10
|
from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig
|
|
11
|
-
from pytorch_tabular.models import CategoryEmbeddingModelConfig
|
|
11
|
+
from pytorch_tabular.models import CategoryEmbeddingModelConfig
|
|
12
12
|
|
|
13
13
|
# Model Performance Scores
|
|
14
14
|
from sklearn.metrics import (
|
|
@@ -40,7 +40,8 @@ TEMPLATE_PARAMS = {
|
|
|
40
40
|
"features": ['molwt', 'mollogp', 'molmr', 'heavyatomcount', 'numhacceptors', 'numhdonors', 'numheteroatoms', 'numrotatablebonds', 'numvalenceelectrons', 'numaromaticrings', 'numsaturatedrings', 'numaliphaticrings', 'ringcount', 'tpsa', 'labuteasa', 'balabanj', 'bertzct'],
|
|
41
41
|
"compressed_features": [],
|
|
42
42
|
"model_metrics_s3_path": "s3://sandbox-sageworks-artifacts/models/aqsol-pytorch-class/training",
|
|
43
|
-
"train_all_data": False
|
|
43
|
+
"train_all_data": False,
|
|
44
|
+
"hyperparameters": {'training_config': {'max_epochs': 150}, 'model_config': {'layers': '256-128-64'}}
|
|
44
45
|
}
|
|
45
46
|
|
|
46
47
|
|
|
@@ -203,15 +204,6 @@ def decompress_features(
|
|
|
203
204
|
return df, decompressed_features
|
|
204
205
|
|
|
205
206
|
|
|
206
|
-
def model_fn(model_dir):
|
|
207
|
-
"""Deserialize and return fitted PyTorch Tabular model"""
|
|
208
|
-
#
|
|
209
|
-
os.environ['TEMP'] = '/tmp'
|
|
210
|
-
model_path = os.path.join(model_dir, "tabular_model")
|
|
211
|
-
model = TabularModel.load_model(model_path)
|
|
212
|
-
return model
|
|
213
|
-
|
|
214
|
-
|
|
215
207
|
def model_fn(model_dir):
|
|
216
208
|
|
|
217
209
|
# Save current working directory
|
|
@@ -343,6 +335,7 @@ if __name__ == "__main__":
|
|
|
343
335
|
model_type = TEMPLATE_PARAMS["model_type"]
|
|
344
336
|
model_metrics_s3_path = TEMPLATE_PARAMS["model_metrics_s3_path"]
|
|
345
337
|
train_all_data = TEMPLATE_PARAMS["train_all_data"]
|
|
338
|
+
hyperparameters = TEMPLATE_PARAMS["hyperparameters"]
|
|
346
339
|
validation_split = 0.2
|
|
347
340
|
|
|
348
341
|
# Script arguments for input/output directories
|
|
@@ -412,21 +405,7 @@ if __name__ == "__main__":
|
|
|
412
405
|
categorical_cols=categorical_cols,
|
|
413
406
|
)
|
|
414
407
|
|
|
415
|
-
|
|
416
|
-
auto_lr_find=True,
|
|
417
|
-
batch_size=min(1024, len(df_train) // 4),
|
|
418
|
-
max_epochs=100,
|
|
419
|
-
early_stopping="valid_loss",
|
|
420
|
-
early_stopping_patience=15,
|
|
421
|
-
checkpoints="valid_loss",
|
|
422
|
-
accelerator="auto",
|
|
423
|
-
progress_bar="none",
|
|
424
|
-
gradient_clip_val=1.0,
|
|
425
|
-
)
|
|
426
|
-
|
|
427
|
-
optimizer_config = OptimizerConfig()
|
|
428
|
-
|
|
429
|
-
# Choose model configuration based on model type
|
|
408
|
+
# Choose the 'task' based on model type also set up the label encoder if needed
|
|
430
409
|
if model_type == "classifier":
|
|
431
410
|
task = "classification"
|
|
432
411
|
# Encode the target column
|
|
@@ -437,30 +416,70 @@ if __name__ == "__main__":
|
|
|
437
416
|
task = "regression"
|
|
438
417
|
label_encoder = None
|
|
439
418
|
|
|
440
|
-
# Use
|
|
419
|
+
# Use any hyperparameters to set up both the trainer and model configurations
|
|
420
|
+
print(f"Hyperparameters: {hyperparameters}")
|
|
421
|
+
|
|
422
|
+
# Set up PyTorch Tabular configuration with defaults
|
|
423
|
+
trainer_defaults = {
|
|
424
|
+
"auto_lr_find": True,
|
|
425
|
+
"batch_size": min(1024, max(32, len(df_train) // 4)),
|
|
426
|
+
"max_epochs": 100,
|
|
427
|
+
"early_stopping": "valid_loss",
|
|
428
|
+
"early_stopping_patience": 15,
|
|
429
|
+
"checkpoints": "valid_loss",
|
|
430
|
+
"accelerator": "auto",
|
|
431
|
+
"progress_bar": "none",
|
|
432
|
+
"gradient_clip_val": 1.0,
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
# Override defaults with training_config if present
|
|
436
|
+
training_overrides = {k: v for k, v in hyperparameters.get('training_config', {}).items()
|
|
437
|
+
if k in trainer_defaults}
|
|
438
|
+
# Print overwrites
|
|
439
|
+
for key, value in training_overrides.items():
|
|
440
|
+
print(f"TRAINING CONFIG Override: {key}: {trainer_defaults[key]} → {value}")
|
|
441
|
+
trainer_params = {**trainer_defaults, **training_overrides}
|
|
442
|
+
trainer_config = TrainerConfig(**trainer_params)
|
|
443
|
+
|
|
444
|
+
# Model config defaults
|
|
445
|
+
model_defaults = {
|
|
446
|
+
"layers": "1024-512-512",
|
|
447
|
+
"activation": "ReLU",
|
|
448
|
+
"learning_rate": 1e-3,
|
|
449
|
+
"dropout": 0.1,
|
|
450
|
+
"use_batch_norm": True,
|
|
451
|
+
"initialization": "kaiming",
|
|
452
|
+
}
|
|
453
|
+
# Override defaults with model_config if present
|
|
454
|
+
model_overrides = {k: v for k, v in hyperparameters.get('model_config', {}).items()
|
|
455
|
+
if k in model_defaults}
|
|
456
|
+
# Print overwrites
|
|
457
|
+
for key, value in model_overrides.items():
|
|
458
|
+
print(f"MODEL CONFIG Override: {key}: {model_defaults[key]} → {value}")
|
|
459
|
+
model_params = {**model_defaults, **model_overrides}
|
|
460
|
+
|
|
461
|
+
# Use CategoryEmbedding model configuration for general-purpose tabular modeling.
|
|
462
|
+
# Works effectively for both regression and classification as the foundational
|
|
463
|
+
# architecture in PyTorch Tabular
|
|
441
464
|
model_config = CategoryEmbeddingModelConfig(
|
|
442
465
|
task=task,
|
|
443
|
-
|
|
444
|
-
activation="ReLU",
|
|
445
|
-
learning_rate=1e-3,
|
|
446
|
-
dropout=0.1,
|
|
447
|
-
use_batch_norm=True,
|
|
448
|
-
initialization="kaiming",
|
|
466
|
+
**model_params
|
|
449
467
|
)
|
|
468
|
+
optimizer_config = OptimizerConfig()
|
|
450
469
|
|
|
451
|
-
|
|
470
|
+
#####################################
|
|
471
|
+
# Create and train the TabularModel #
|
|
472
|
+
#####################################
|
|
452
473
|
tabular_model = TabularModel(
|
|
453
474
|
data_config=data_config,
|
|
454
475
|
model_config=model_config,
|
|
455
476
|
optimizer_config=optimizer_config,
|
|
456
477
|
trainer_config=trainer_config,
|
|
457
478
|
)
|
|
458
|
-
|
|
459
|
-
# Train the model
|
|
460
479
|
tabular_model.fit(train=df_train, validation=df_val)
|
|
461
480
|
|
|
462
481
|
# Make Predictions on the Validation Set
|
|
463
|
-
print(
|
|
482
|
+
print("Making Predictions on Validation Set...")
|
|
464
483
|
result = tabular_model.predict(df_val, include_input_features=False)
|
|
465
484
|
|
|
466
485
|
# pytorch-tabular returns predictions using f"{target}_prediction" column
|
|
@@ -554,4 +573,4 @@ if __name__ == "__main__":
|
|
|
554
573
|
|
|
555
574
|
# Save the category mappings
|
|
556
575
|
with open(os.path.join(args.model_dir, "category_mappings.json"), "w") as fp:
|
|
557
|
-
json.dump(category_mappings, fp)
|
|
576
|
+
json.dump(category_mappings, fp)
|
|
@@ -40,7 +40,8 @@ TEMPLATE_PARAMS = {
|
|
|
40
40
|
"features": "{{feature_list}}",
|
|
41
41
|
"compressed_features": "{{compressed_features}}",
|
|
42
42
|
"model_metrics_s3_path": "{{model_metrics_s3_path}}",
|
|
43
|
-
"train_all_data": "{{train_all_data}}"
|
|
43
|
+
"train_all_data": "{{train_all_data}}",
|
|
44
|
+
"hyperparameters": "{{hyperparameters}}"
|
|
44
45
|
}
|
|
45
46
|
|
|
46
47
|
|
|
@@ -334,6 +335,7 @@ if __name__ == "__main__":
|
|
|
334
335
|
model_type = TEMPLATE_PARAMS["model_type"]
|
|
335
336
|
model_metrics_s3_path = TEMPLATE_PARAMS["model_metrics_s3_path"]
|
|
336
337
|
train_all_data = TEMPLATE_PARAMS["train_all_data"]
|
|
338
|
+
hyperparameters = TEMPLATE_PARAMS["hyperparameters"]
|
|
337
339
|
validation_split = 0.2
|
|
338
340
|
|
|
339
341
|
# Script arguments for input/output directories
|
|
@@ -403,21 +405,7 @@ if __name__ == "__main__":
|
|
|
403
405
|
categorical_cols=categorical_cols,
|
|
404
406
|
)
|
|
405
407
|
|
|
406
|
-
|
|
407
|
-
auto_lr_find=True,
|
|
408
|
-
batch_size=min(1024, len(df_train) // 4),
|
|
409
|
-
max_epochs=100,
|
|
410
|
-
early_stopping="valid_loss",
|
|
411
|
-
early_stopping_patience=15,
|
|
412
|
-
checkpoints="valid_loss",
|
|
413
|
-
accelerator="auto",
|
|
414
|
-
progress_bar="none",
|
|
415
|
-
gradient_clip_val=1.0,
|
|
416
|
-
)
|
|
417
|
-
|
|
418
|
-
optimizer_config = OptimizerConfig()
|
|
419
|
-
|
|
420
|
-
# Choose model configuration based on model type
|
|
408
|
+
# Choose the 'task' based on model type also set up the label encoder if needed
|
|
421
409
|
if model_type == "classifier":
|
|
422
410
|
task = "classification"
|
|
423
411
|
# Encode the target column
|
|
@@ -428,26 +416,66 @@ if __name__ == "__main__":
|
|
|
428
416
|
task = "regression"
|
|
429
417
|
label_encoder = None
|
|
430
418
|
|
|
431
|
-
# Use
|
|
419
|
+
# Use any hyperparameters to set up both the trainer and model configurations
|
|
420
|
+
print(f"Hyperparameters: {hyperparameters}")
|
|
421
|
+
|
|
422
|
+
# Set up PyTorch Tabular configuration with defaults
|
|
423
|
+
trainer_defaults = {
|
|
424
|
+
"auto_lr_find": True,
|
|
425
|
+
"batch_size": min(1024, max(32, len(df_train) // 4)),
|
|
426
|
+
"max_epochs": 100,
|
|
427
|
+
"early_stopping": "valid_loss",
|
|
428
|
+
"early_stopping_patience": 15,
|
|
429
|
+
"checkpoints": "valid_loss",
|
|
430
|
+
"accelerator": "auto",
|
|
431
|
+
"progress_bar": "none",
|
|
432
|
+
"gradient_clip_val": 1.0,
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
# Override defaults with training_config if present
|
|
436
|
+
training_overrides = {k: v for k, v in hyperparameters.get('training_config', {}).items()
|
|
437
|
+
if k in trainer_defaults}
|
|
438
|
+
# Print overwrites
|
|
439
|
+
for key, value in training_overrides.items():
|
|
440
|
+
print(f"TRAINING CONFIG Override: {key}: {trainer_defaults[key]} → {value}")
|
|
441
|
+
trainer_params = {**trainer_defaults, **training_overrides}
|
|
442
|
+
trainer_config = TrainerConfig(**trainer_params)
|
|
443
|
+
|
|
444
|
+
# Model config defaults
|
|
445
|
+
model_defaults = {
|
|
446
|
+
"layers": "1024-512-512",
|
|
447
|
+
"activation": "ReLU",
|
|
448
|
+
"learning_rate": 1e-3,
|
|
449
|
+
"dropout": 0.1,
|
|
450
|
+
"use_batch_norm": True,
|
|
451
|
+
"initialization": "kaiming",
|
|
452
|
+
}
|
|
453
|
+
# Override defaults with model_config if present
|
|
454
|
+
model_overrides = {k: v for k, v in hyperparameters.get('model_config', {}).items()
|
|
455
|
+
if k in model_defaults}
|
|
456
|
+
# Print overwrites
|
|
457
|
+
for key, value in model_overrides.items():
|
|
458
|
+
print(f"MODEL CONFIG Override: {key}: {model_defaults[key]} → {value}")
|
|
459
|
+
model_params = {**model_defaults, **model_overrides}
|
|
460
|
+
|
|
461
|
+
# Use CategoryEmbedding model configuration for general-purpose tabular modeling.
|
|
462
|
+
# Works effectively for both regression and classification as the foundational
|
|
463
|
+
# architecture in PyTorch Tabular
|
|
432
464
|
model_config = CategoryEmbeddingModelConfig(
|
|
433
465
|
task=task,
|
|
434
|
-
|
|
435
|
-
activation="ReLU",
|
|
436
|
-
learning_rate=1e-3,
|
|
437
|
-
dropout=0.1,
|
|
438
|
-
use_batch_norm=True,
|
|
439
|
-
initialization="kaiming",
|
|
466
|
+
**model_params
|
|
440
467
|
)
|
|
468
|
+
optimizer_config = OptimizerConfig()
|
|
441
469
|
|
|
442
|
-
|
|
470
|
+
#####################################
|
|
471
|
+
# Create and train the TabularModel #
|
|
472
|
+
#####################################
|
|
443
473
|
tabular_model = TabularModel(
|
|
444
474
|
data_config=data_config,
|
|
445
475
|
model_config=model_config,
|
|
446
476
|
optimizer_config=optimizer_config,
|
|
447
477
|
trainer_config=trainer_config,
|
|
448
478
|
)
|
|
449
|
-
|
|
450
|
-
# Train the model
|
|
451
479
|
tabular_model.fit(train=df_train, validation=df_val)
|
|
452
480
|
|
|
453
481
|
# Make Predictions on the Validation Set
|
|
@@ -68,7 +68,7 @@ def fill_template(template_path: str, params: dict, output_script: str) -> str:
|
|
|
68
68
|
template = template.replace(placeholder, str(value))
|
|
69
69
|
|
|
70
70
|
# Sanity check to ensure all placeholders were replaced
|
|
71
|
-
if "{{" in template
|
|
71
|
+
if "{{" in template and "}}" in template:
|
|
72
72
|
msg = "Not all template placeholders were replaced. Please check your params."
|
|
73
73
|
log.critical(msg)
|
|
74
74
|
raise ValueError(msg)
|
|
@@ -93,6 +93,7 @@ def generate_model_script(template_params: dict) -> str:
|
|
|
93
93
|
- feature_list (list[str]): A list of columns for the features
|
|
94
94
|
- model_metrics_s3_path (str): The S3 path to store the model metrics
|
|
95
95
|
- train_all_data (bool): Whether to train on all (100%) of the data
|
|
96
|
+
- hyperparameters (dict, optional): Hyperparameters for the model (default: None)
|
|
96
97
|
|
|
97
98
|
Returns:
|
|
98
99
|
str: The name of the generated model script
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: workbench
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.162
|
|
4
4
|
Summary: Workbench: A Dashboard and Python API for creating and deploying AWS SageMaker Model Pipelines
|
|
5
5
|
Author-email: SuperCowPowers LLC <support@supercowpowers.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -32,12 +32,12 @@ workbench/api/compound.py,sha256=BHd3Qu4Ra45FEuwiowhFfGMI_HKRRB10XMmoS6ljKrM,254
|
|
|
32
32
|
workbench/api/data_source.py,sha256=Ngz36YZWxFfpJbmURhM1LQPYjh5kdpZNGo6_fCRePbA,8321
|
|
33
33
|
workbench/api/df_store.py,sha256=Wybb3zO-jPpAi2Ns8Ks1-lagvXAaBlRpBZHhnnl3Lms,6131
|
|
34
34
|
workbench/api/endpoint.py,sha256=ejDnfBBgNYMZB-bOA5nX7C6CtBlAjmtrF8M_zpri9Io,3451
|
|
35
|
-
workbench/api/feature_set.py,sha256=
|
|
35
|
+
workbench/api/feature_set.py,sha256=wzNxNjN0K2FaIC7QUIogMnoHqw2vo0iAHYlGk6fWLCw,6649
|
|
36
36
|
workbench/api/graph_store.py,sha256=LremJyPrQFgsHb7hxsctuCsoxx3p7TKtaY5qALHe6pc,4372
|
|
37
37
|
workbench/api/meta.py,sha256=fCOtZMfAHWaerzcsTeFnimXfgV8STe9JDiB7QBogktc,8456
|
|
38
38
|
workbench/api/model.py,sha256=2hPN8UK4whZ0kDgPtbR7lEknw7XhH5hGYaHA55jmZWQ,4529
|
|
39
39
|
workbench/api/monitor.py,sha256=kQHSFiVLRWnHekSdatMKR3QbRj1BBNrVXpZgvV83LPM,5027
|
|
40
|
-
workbench/api/parameter_store.py,sha256=
|
|
40
|
+
workbench/api/parameter_store.py,sha256=7BObkuATuP6C5AG_46kCWsmuCwuh1vgMJDBSN0gTkwM,4294
|
|
41
41
|
workbench/api/pipeline.py,sha256=MSYGrDSXrRB_oQELtAlOwBfxSBTw3REAkHy5XBHau0Y,6261
|
|
42
42
|
workbench/cached/__init__.py,sha256=wvTyIFvusv2HjU3yop6OSr3js5_-SZuR8nPmlCuZQJ4,525
|
|
43
43
|
workbench/cached/cached_data_source.py,sha256=A0o4H9g1aEms8HkOHWnb46vJ5fx6ebs1aCYaQcf8gPI,2649
|
|
@@ -55,7 +55,7 @@ workbench/core/artifacts/data_source_abstract.py,sha256=5IRCzFVK-17cd4NXPMRfx99v
|
|
|
55
55
|
workbench/core/artifacts/data_source_factory.py,sha256=YL_tA5fsgubbB3dPF6T4tO0rGgz-6oo3ge4i_YXVC-M,2380
|
|
56
56
|
workbench/core/artifacts/endpoint_core.py,sha256=L6uWOxHKItjbpRS2rFrAbxAqDyZIv2CO9dnZpohKrUI,48768
|
|
57
57
|
workbench/core/artifacts/feature_set_core.py,sha256=055VdSYR09HP4ygAuYvIYtHQ7Ec4XxsZygpgEl5H5jQ,29136
|
|
58
|
-
workbench/core/artifacts/model_core.py,sha256=
|
|
58
|
+
workbench/core/artifacts/model_core.py,sha256=U0dSkpZMrsIgbUglVkPwAgN0gji7Oa7glOjqMQJDAzE,50927
|
|
59
59
|
workbench/core/artifacts/monitor_core.py,sha256=BP6UuCyBI4zB2wwcIXvUw6RC0EktTcQd5Rv0x73qzio,37670
|
|
60
60
|
workbench/core/cloud_platform/cloud_meta.py,sha256=-g4-LTC3D0PXb3VfaXdLR1ERijKuHdffeMK_zhD-koQ,8809
|
|
61
61
|
workbench/core/cloud_platform/aws/README.md,sha256=QT5IQXoUHbIA0qQ2wO6_2P2lYjYQFVYuezc22mWY4i8,97
|
|
@@ -101,7 +101,7 @@ workbench/core/transforms/features_to_features/__init__.py,sha256=47DEQpj8HBSa-_
|
|
|
101
101
|
workbench/core/transforms/features_to_features/heavy/emr/Readme.md,sha256=YtQgCEQeKe0CQXQkhzMTYq9xOtCsCYb5P5LW2BmRKWQ,68
|
|
102
102
|
workbench/core/transforms/features_to_features/heavy/glue/Readme.md,sha256=TuyCatWfoDr99zUwvOcxf-TqMkQzaMqXlj5nmFcRzfo,48
|
|
103
103
|
workbench/core/transforms/features_to_model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
104
|
-
workbench/core/transforms/features_to_model/features_to_model.py,sha256=
|
|
104
|
+
workbench/core/transforms/features_to_model/features_to_model.py,sha256=gwqdQZJUIfZv1M7uGhzzBxUwRS0thJE_o_H2IUsBT40,19789
|
|
105
105
|
workbench/core/transforms/model_to_endpoint/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
106
106
|
workbench/core/transforms/model_to_endpoint/model_to_endpoint.py,sha256=hbLsdOVlfAH4XCVNUfr3SFH8rKjxIs0QyYrNwjCh7SM,4970
|
|
107
107
|
workbench/core/transforms/pandas_transforms/__init__.py,sha256=xL4MT8-fZ1SFqDbTLc8XyxjupHtB1YR6Ej0AC2nwd7I,894
|
|
@@ -121,7 +121,7 @@ workbench/core/views/training_view.py,sha256=mUkv1oVhDG-896RdLNKxCg0j0yvudEcPnvL
|
|
|
121
121
|
workbench/core/views/view.py,sha256=Ujzw6zLROP9oKfKm3zJwaOyfpyjh5uM9fAu1i3kUOig,11764
|
|
122
122
|
workbench/core/views/view_utils.py,sha256=y0YuPW-90nAfgAD1UW_49-j7Mvncfm7-5rV8I_97CK8,12274
|
|
123
123
|
workbench/core/views/storage/mdq_view.py,sha256=qf_ep1KwaXOIfO930laEwNIiCYP7VNOqjE3VdHfopRE,5195
|
|
124
|
-
workbench/model_scripts/script_generation.py,sha256=
|
|
124
|
+
workbench/model_scripts/script_generation.py,sha256=VVgRxvJsbpn6Uw6KpDAZe8oMtmg0l_fTImxDiUFJEes,7722
|
|
125
125
|
workbench/model_scripts/custom_models/chem_info/Readme.md,sha256=mH1lxJ4Pb7F5nBnVXaiuxpi8zS_yjUw_LBJepVKXhlA,574
|
|
126
126
|
workbench/model_scripts/custom_models/chem_info/local_utils.py,sha256=Rsz_VRoA3O3-VoitmN8o5OymstsF433QgdSRHc-iZ24,29071
|
|
127
127
|
workbench/model_scripts/custom_models/chem_info/molecular_descriptors.py,sha256=E8SK4oOHaYnYx4ycQJ6R7yg799kjtbipM3KEc8SPArQ,3011
|
|
@@ -150,8 +150,8 @@ workbench/model_scripts/custom_script_example/requirements.txt,sha256=jWlGc7HH7v
|
|
|
150
150
|
workbench/model_scripts/ensemble_xgb/ensemble_xgb.template,sha256=s8tPPk_q6UqA2nAzknD8viA-kN7f62Rim2XwMKcqHKc,10399
|
|
151
151
|
workbench/model_scripts/ensemble_xgb/generated_model_script.py,sha256=dsjUGm22xI1ThGn97HPKtooyEPK-HOQnf5chnZ7-MXk,10675
|
|
152
152
|
workbench/model_scripts/ensemble_xgb/requirements.txt,sha256=jWlGc7HH7vqyukTm38LN4EyDi8jDUPEay4n45z-30uc,104
|
|
153
|
-
workbench/model_scripts/pytorch_model/generated_model_script.py,sha256=
|
|
154
|
-
workbench/model_scripts/pytorch_model/pytorch.template,sha256=
|
|
153
|
+
workbench/model_scripts/pytorch_model/generated_model_script.py,sha256=Mr1IMQJE_ML899qjzhjkrP521IjvcAvqU0pk--FB7KY,22356
|
|
154
|
+
workbench/model_scripts/pytorch_model/pytorch.template,sha256=3jM3RUH68r75eH9Wayz6YTXZ7qpuDnaJCKKcHD_oKqA,22054
|
|
155
155
|
workbench/model_scripts/pytorch_model/requirements.txt,sha256=ICS5nW0wix44EJO2tJszJSaUrSvhSfdedn6FcRInGx4,181
|
|
156
156
|
workbench/model_scripts/quant_regression/quant_regression.template,sha256=AQihffV68qI6CG9qztA0jGunDWoijb3eeDWNG5tiIGc,9818
|
|
157
157
|
workbench/model_scripts/quant_regression/requirements.txt,sha256=jWlGc7HH7vqyukTm38LN4EyDi8jDUPEay4n45z-30uc,104
|
|
@@ -275,9 +275,9 @@ workbench/web_interface/page_views/main_page.py,sha256=X4-KyGTKLAdxR-Zk2niuLJB2Y
|
|
|
275
275
|
workbench/web_interface/page_views/models_page_view.py,sha256=M0bdC7bAzLyIaE2jviY12FF4abdMFZmg6sFuOY_LaGI,2650
|
|
276
276
|
workbench/web_interface/page_views/page_view.py,sha256=Gh6YnpOGlUejx-bHZAf5pzqoQ1H1R0OSwOpGhOBO06w,455
|
|
277
277
|
workbench/web_interface/page_views/pipelines_page_view.py,sha256=v2pxrIbsHBcYiblfius3JK766NZ7ciD2yPx0t3E5IJo,2656
|
|
278
|
-
workbench-0.8.
|
|
279
|
-
workbench-0.8.
|
|
280
|
-
workbench-0.8.
|
|
281
|
-
workbench-0.8.
|
|
282
|
-
workbench-0.8.
|
|
283
|
-
workbench-0.8.
|
|
278
|
+
workbench-0.8.162.dist-info/licenses/LICENSE,sha256=z4QMMPlLJkZjU8VOKqJkZiQZCEZ--saIU2Z8-p3aVc0,1080
|
|
279
|
+
workbench-0.8.162.dist-info/METADATA,sha256=2f0Ok7oYPAE3FA8mcG-9ySPkn1T_NygcKi3hNeXMEZY,9264
|
|
280
|
+
workbench-0.8.162.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
281
|
+
workbench-0.8.162.dist-info/entry_points.txt,sha256=oZykkheWiiIBjRE8cS5SdcxwmZKSFaQEGwMBjNh-eNM,238
|
|
282
|
+
workbench-0.8.162.dist-info/top_level.txt,sha256=Dhy72zTxaA_o_yRkPZx5zw-fwumnjGaeGf0hBN3jc_w,10
|
|
283
|
+
workbench-0.8.162.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|