aimodelshare 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aimodelshare might be problematic. Click here for more details.
- aimodelshare/aimsonnx.py +20 -10
- aimodelshare/api.py +2 -1
- aimodelshare/main/eval_lambda.txt +14 -0
- aimodelshare/playground.py +8 -1
- {aimodelshare-0.1.4.dist-info → aimodelshare-0.1.5.dist-info}/METADATA +24 -26
- {aimodelshare-0.1.4.dist-info → aimodelshare-0.1.5.dist-info}/RECORD +11 -11
- {aimodelshare-0.1.4.dist-info → aimodelshare-0.1.5.dist-info}/WHEEL +1 -1
- tests/test_aimsonnx.py +135 -0
- tests/test_playground.py +394 -18
- {aimodelshare-0.1.4.dist-info → aimodelshare-0.1.5.dist-info}/LICENSE +0 -0
- {aimodelshare-0.1.4.dist-info → aimodelshare-0.1.5.dist-info}/top_level.txt +0 -0
aimodelshare/aimsonnx.py
CHANGED
|
@@ -3,25 +3,36 @@ import pandas as pd
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
|
|
5
5
|
# ml frameworks
|
|
6
|
-
|
|
7
6
|
try:
|
|
8
7
|
import sklearn
|
|
9
8
|
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
|
|
10
9
|
except:
|
|
11
|
-
|
|
10
|
+
print("Warning: Please install sklearn to enable sklearn features")
|
|
11
|
+
|
|
12
12
|
try:
|
|
13
13
|
import torch
|
|
14
14
|
except:
|
|
15
|
-
|
|
15
|
+
print("Warning: Please install pytorch to enable pytorch features")
|
|
16
|
+
|
|
16
17
|
try:
|
|
17
18
|
import xgboost
|
|
18
19
|
except:
|
|
19
|
-
|
|
20
|
+
print("Warning: Please install xgboost to enable xgboost features")
|
|
21
|
+
|
|
20
22
|
try:
|
|
21
23
|
import tensorflow as tf
|
|
22
24
|
import keras
|
|
23
25
|
except:
|
|
24
|
-
|
|
26
|
+
print("Warning: Please install tensorflow/keras to enable tensorflow/keras features")
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
import pyspark
|
|
30
|
+
from pyspark.sql import SparkSession
|
|
31
|
+
from pyspark.ml import PipelineModel, Model
|
|
32
|
+
from pyspark.ml.tuning import CrossValidatorModel, TrainValidationSplitModel
|
|
33
|
+
from onnxmltools import convert_sparkml
|
|
34
|
+
except:
|
|
35
|
+
print("Warning: Please install pyspark to enable pyspark features")
|
|
25
36
|
|
|
26
37
|
|
|
27
38
|
# onnx modules
|
|
@@ -1184,7 +1195,6 @@ def onnx_to_image(model):
|
|
|
1184
1195
|
return pydot_graph
|
|
1185
1196
|
|
|
1186
1197
|
|
|
1187
|
-
|
|
1188
1198
|
def inspect_model(apiurl, version=None, naming_convention = None, submission_type="competition"):
|
|
1189
1199
|
if all(["username" in os.environ,
|
|
1190
1200
|
"password" in os.environ]):
|
|
@@ -1582,9 +1592,9 @@ def model_from_string(model_type):
|
|
|
1582
1592
|
def _get_pyspark_modules():
|
|
1583
1593
|
try:
|
|
1584
1594
|
if pyspark is None:
|
|
1585
|
-
raise("Error: Please install pyspark to enable pyspark features")
|
|
1595
|
+
raise Exception("Error: Please install pyspark to enable pyspark features")
|
|
1586
1596
|
except:
|
|
1587
|
-
raise("Error: Please install pyspark to enable pyspark features")
|
|
1597
|
+
raise Exception("Error: Please install pyspark to enable pyspark features")
|
|
1588
1598
|
|
|
1589
1599
|
pyspark_modules = ['ml', 'ml.feature', 'ml.classification', 'ml.clustering', 'ml.regression']
|
|
1590
1600
|
|
|
@@ -1603,9 +1613,9 @@ def _get_pyspark_modules():
|
|
|
1603
1613
|
def pyspark_model_from_string(model_type):
|
|
1604
1614
|
try:
|
|
1605
1615
|
if pyspark is None:
|
|
1606
|
-
raise("Error: Please install pyspark to enable pyspark features")
|
|
1616
|
+
raise Exception("Error: Please install pyspark to enable pyspark features")
|
|
1607
1617
|
except:
|
|
1608
|
-
raise("Error: Please install pyspark to enable pyspark features")
|
|
1618
|
+
raise Exception("Error: Please install pyspark to enable pyspark features")
|
|
1609
1619
|
|
|
1610
1620
|
models_modules_dict = _get_pyspark_modules()
|
|
1611
1621
|
module = models_modules_dict[model_type]
|
aimodelshare/api.py
CHANGED
|
@@ -211,7 +211,8 @@ class create_prediction_api_class():
|
|
|
211
211
|
###
|
|
212
212
|
api_key = str(shortuuid.uuid())
|
|
213
213
|
|
|
214
|
-
t = Template(pkg_resources.read_text(main, 'eval_lambda.txt').replace("$apikey",api_key))
|
|
214
|
+
t = Template(pkg_resources.read_text(main, 'eval_lambda.txt').replace("$apikey",api_key).replace("$task_type",self.task_type))
|
|
215
|
+
|
|
215
216
|
data = t.substitute(bucket_name = self.bucket_name, unique_model_id = self.unique_model_id, task_type = self.task_type)
|
|
216
217
|
with open(os.path.join(self.temp_dir, 'main.py'), 'w') as file:
|
|
217
218
|
file.write(data)
|
|
@@ -33,6 +33,20 @@ def handler(event, context):
|
|
|
33
33
|
if value == "None":
|
|
34
34
|
body[key]=None
|
|
35
35
|
|
|
36
|
+
|
|
37
|
+
if body.get("return_task_type","ALL") == "True" or body.get("return_task_type", "ALL") == "TRUE":
|
|
38
|
+
task_type="$task_type"
|
|
39
|
+
task_type_dict = {"statusCode": 200,
|
|
40
|
+
"headers": {
|
|
41
|
+
"Access-Control-Allow-Origin" : "*",
|
|
42
|
+
"Access-Control-Allow-Credentials": True,
|
|
43
|
+
"Allow" : "GET, OPTIONS, POST",
|
|
44
|
+
"Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
|
|
45
|
+
"Access-Control-Allow-Headers" : "*"},
|
|
46
|
+
"body": json.dumps({"task_type":task_type})
|
|
47
|
+
}
|
|
48
|
+
return task_type_dict
|
|
49
|
+
|
|
36
50
|
if body.get("exampledata", "ALL") == "True" or body.get("exampledata", "ALL") == "TRUE":
|
|
37
51
|
|
|
38
52
|
exampledata=get_exampledata(example_data_filename = "exampledata.json")
|
aimodelshare/playground.py
CHANGED
|
@@ -50,6 +50,13 @@ class ModelPlayground:
|
|
|
50
50
|
|
|
51
51
|
self.model_type = input_type
|
|
52
52
|
|
|
53
|
+
if task_type == None:
|
|
54
|
+
post_dict = {"return_task_type": "TRUE"}
|
|
55
|
+
headers = { 'Content-Type':'application/json', 'authorizationToken': os.environ.get("AWS_TOKEN"),}
|
|
56
|
+
playground_url_eval=playground_url[:-1]+"eval"
|
|
57
|
+
response = requests.post(playground_url_eval,headers=headers,data=json.dumps(post_dict))
|
|
58
|
+
task_type = json.loads(response.text)['task_type']
|
|
59
|
+
|
|
53
60
|
if task_type == "classification":
|
|
54
61
|
self.categorical = True
|
|
55
62
|
elif task_type == "regression":
|
|
@@ -786,7 +793,7 @@ class ModelPlayground:
|
|
|
786
793
|
eval_metric_filepath=eval_metric_filepath,
|
|
787
794
|
email_list=email_list,
|
|
788
795
|
public=public,
|
|
789
|
-
public_private_split=
|
|
796
|
+
public_private_split=0, #set to 0 because its an experiment
|
|
790
797
|
input_dict=exp_input_dict,
|
|
791
798
|
print_output=False)
|
|
792
799
|
|
|
@@ -1,43 +1,41 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: aimodelshare
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.5
|
|
4
4
|
Summary: Deploy locally saved machine learning models to a live rest API and web-dashboard. Share it with the world via modelshare.org
|
|
5
5
|
Home-page: https://www.modelshare.org
|
|
6
6
|
Author: Michael Parrott
|
|
7
7
|
Author-email: mikedparrott@modelshare.org
|
|
8
|
-
License: UNKNOWN
|
|
9
|
-
Platform: UNKNOWN
|
|
10
8
|
Classifier: Programming Language :: Python :: 3
|
|
11
9
|
Classifier: License :: Other/Proprietary License
|
|
12
10
|
Classifier: Operating System :: OS Independent
|
|
13
11
|
Requires-Python: >=3.7
|
|
14
12
|
Description-Content-Type: text/markdown
|
|
15
13
|
License-File: LICENSE
|
|
16
|
-
Requires-Dist: boto3
|
|
17
|
-
Requires-Dist: botocore
|
|
18
|
-
Requires-Dist: scikit-learn
|
|
19
|
-
Requires-Dist: onnx
|
|
20
|
-
Requires-Dist: onnxconverter-common
|
|
14
|
+
Requires-Dist: boto3 ==1.26.69
|
|
15
|
+
Requires-Dist: botocore ==1.29.82
|
|
16
|
+
Requires-Dist: scikit-learn ==1.2.2
|
|
17
|
+
Requires-Dist: onnx ==1.13.1
|
|
18
|
+
Requires-Dist: onnxconverter-common >=1.7.0
|
|
21
19
|
Requires-Dist: regex
|
|
22
|
-
Requires-Dist: keras2onnx
|
|
23
|
-
Requires-Dist: tensorflow
|
|
20
|
+
Requires-Dist: keras2onnx >=1.7.0
|
|
21
|
+
Requires-Dist: tensorflow >=2.12
|
|
24
22
|
Requires-Dist: tf2onnx
|
|
25
|
-
Requires-Dist: skl2onnx
|
|
26
|
-
Requires-Dist: onnxruntime
|
|
27
|
-
Requires-Dist: torch
|
|
28
|
-
Requires-Dist: pydot
|
|
29
|
-
Requires-Dist: importlib-resources
|
|
30
|
-
Requires-Dist: onnxmltools
|
|
31
|
-
Requires-Dist: Pympler
|
|
32
|
-
Requires-Dist: docker
|
|
33
|
-
Requires-Dist: wget
|
|
34
|
-
Requires-Dist: PyJWT
|
|
35
|
-
Requires-Dist: seaborn
|
|
36
|
-
Requires-Dist: astunparse
|
|
37
|
-
Requires-Dist: shortuuid
|
|
38
|
-
Requires-Dist: psutil
|
|
39
|
-
Requires-Dist: pathlib
|
|
40
|
-
Requires-Dist: protobuf
|
|
23
|
+
Requires-Dist: skl2onnx >=1.14.0
|
|
24
|
+
Requires-Dist: onnxruntime >=1.7.0
|
|
25
|
+
Requires-Dist: torch >=1.8.1
|
|
26
|
+
Requires-Dist: pydot ==1.3.0
|
|
27
|
+
Requires-Dist: importlib-resources ==5.10.0
|
|
28
|
+
Requires-Dist: onnxmltools >=1.6.1
|
|
29
|
+
Requires-Dist: Pympler ==0.9
|
|
30
|
+
Requires-Dist: docker ==5.0.0
|
|
31
|
+
Requires-Dist: wget ==3.2
|
|
32
|
+
Requires-Dist: PyJWT >=2.4.0
|
|
33
|
+
Requires-Dist: seaborn >=0.11.2
|
|
34
|
+
Requires-Dist: astunparse ==1.6.3
|
|
35
|
+
Requires-Dist: shortuuid >=1.0.8
|
|
36
|
+
Requires-Dist: psutil >=5.9.1
|
|
37
|
+
Requires-Dist: pathlib >=1.0.1
|
|
38
|
+
Requires-Dist: protobuf >=3.20.1
|
|
41
39
|
Requires-Dist: dill
|
|
42
40
|
|
|
43
41
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
aimodelshare/README.md,sha256=_OMdUIeIYZnpFlKdafM1KNWaANO2nWdx0QpLE_ZC-Qs,2014
|
|
2
2
|
aimodelshare/__init__.py,sha256=CS0iFxgAic21gBcQE6NSZ-D_ElHw80_A3OWrEYo9Dks,539
|
|
3
|
-
aimodelshare/aimsonnx.py,sha256=
|
|
4
|
-
aimodelshare/api.py,sha256=
|
|
3
|
+
aimodelshare/aimsonnx.py,sha256=CPZ_OZu909Tcq7x_Q5db_Oh-7vTbfV3bbRgSc7U6YgM,69338
|
|
4
|
+
aimodelshare/api.py,sha256=XgFu30Cabh1G5MxU2ToALt4qp69XjwXJ4KZvIWtF3Zw,34935
|
|
5
5
|
aimodelshare/aws.py,sha256=Ujhqgc1tpqdBVj5RCB_udTBt1lth4EZeLYgLU-dmu78,15188
|
|
6
6
|
aimodelshare/aws_client.py,sha256=Ce19iwf69BwpuyyJlVN8z1da3c5jf93svsTgx1OWhaA,6784
|
|
7
7
|
aimodelshare/base_image.py,sha256=itaQmX_q5GmgQrL3VNCBJpDGhl4PGA-nLTCbuyNDCCc,4825
|
|
@@ -15,7 +15,7 @@ aimodelshare/generatemodelapi.py,sha256=KNIWd8_g7auZi-UC-dCVmazGHJQvs9_68aGaUS47
|
|
|
15
15
|
aimodelshare/leaderboard.py,sha256=xtKJcNCsZjy2IoK1fUTAFyM_I-eLCMS1WJRfwgsT5AA,5216
|
|
16
16
|
aimodelshare/model.py,sha256=03TPnazX2s-6wjkzYl1sSpvzLOmpYW5k_2cIHZGmPgg,49899
|
|
17
17
|
aimodelshare/modeluser.py,sha256=kW1zG4lFcwA0-ZLEyYaD8diJOF8PVhB-RGpPZQB07V4,4311
|
|
18
|
-
aimodelshare/playground.py,sha256=
|
|
18
|
+
aimodelshare/playground.py,sha256=0qRaJ8pn5e2uDrWqdsue199sTfYgYkfi19A29oGahO4,88713
|
|
19
19
|
aimodelshare/postprocessormodules.py,sha256=L87fM2mywlInOrgaMETi-7zdHBGbIMRcrXKttQthyQ4,4992
|
|
20
20
|
aimodelshare/preprocessormodules.py,sha256=ujkIdGWeMEyyc6eTaZi9kZRpfP-JEGzse89fL80ShHY,10912
|
|
21
21
|
aimodelshare/readme.md,sha256=_OMdUIeIYZnpFlKdafM1KNWaANO2nWdx0QpLE_ZC-Qs,2014
|
|
@@ -107,7 +107,7 @@ aimodelshare/main/8.txt,sha256=MfcEQe9Gv6RSmWL3kd7oYkRkdDdkN4bPxEG43QVs7ms,4513
|
|
|
107
107
|
aimodelshare/main/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
108
108
|
aimodelshare/main/authorization.txt,sha256=lBWFZ1pyNuYFSEEWQbfEAZFDspcVE1guzlfpES7HNxk,10942
|
|
109
109
|
aimodelshare/main/eval_classification.txt,sha256=gCBU71rbXRlkBwefVN3WhwVJX9fXh6bwOCa7ofLMdnA,3081
|
|
110
|
-
aimodelshare/main/eval_lambda.txt,sha256=
|
|
110
|
+
aimodelshare/main/eval_lambda.txt,sha256=FZirZ_jrIdJQ90sChz30oBikUJG4eCVqlYg3z4MRS8o,60314
|
|
111
111
|
aimodelshare/main/eval_regression.txt,sha256=iQeE9mbOkg-BDF9TnoQmglo86jBJitJQCvaf1eELzrs,3111
|
|
112
112
|
aimodelshare/main/lambda_function.txt,sha256=-XkuD2YUOWNryNT7rBPjlts588UAeE949TUqeVGCRlQ,150
|
|
113
113
|
aimodelshare/main/nst.txt,sha256=8kTsR18kDEcaQbv6091XDq1tRiqqFxdqfCteslR_udk,4941
|
|
@@ -141,10 +141,10 @@ aimodelshare/sam/codepipeline_trust_relationship.txt,sha256=yfPYvZlN3fnaIHs7I3EN
|
|
|
141
141
|
aimodelshare/sam/spark-class.txt,sha256=chyJBxDzCzlUKXzVQYTzuJ2PXCTwg8_gd1yfnI-xbRw,217
|
|
142
142
|
aimodelshare/sam/template.txt,sha256=JKSvEOZNaaLalHSx7r9psJg_6LLCb0XLAYi1-jYPu3M,1195
|
|
143
143
|
tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
144
|
-
tests/test_aimsonnx.py,sha256
|
|
145
|
-
tests/test_playground.py,sha256=
|
|
146
|
-
aimodelshare-0.1.
|
|
147
|
-
aimodelshare-0.1.
|
|
148
|
-
aimodelshare-0.1.
|
|
149
|
-
aimodelshare-0.1.
|
|
150
|
-
aimodelshare-0.1.
|
|
144
|
+
tests/test_aimsonnx.py,sha256=-GOF1_qXGQaMxHyqK0GPg7dD1meE-S7CZea4pLmBDTk,3906
|
|
145
|
+
tests/test_playground.py,sha256=vdFWPRrZNQ2poiBOoN3l7HsXB5yc3p3rrrclNYJHnaw,24574
|
|
146
|
+
aimodelshare-0.1.5.dist-info/LICENSE,sha256=JXBYLriXYgTloZs-9CJPZY76dqkuDT5df_HghMnljx8,1134
|
|
147
|
+
aimodelshare-0.1.5.dist-info/METADATA,sha256=QDwwbkLuz2D27SYGrhX4lJkzUS9OtJY9ZIxXO84UpOE,3283
|
|
148
|
+
aimodelshare-0.1.5.dist-info/WHEEL,sha256=5sUXSg9e4bi7lTLOHcm6QEYwO5TIF1TNbTSVFVjcJcc,92
|
|
149
|
+
aimodelshare-0.1.5.dist-info/top_level.txt,sha256=2KJgeHQ0BmZuilB75J203i7W4vri6CON2kdbwk9BNpU,19
|
|
150
|
+
aimodelshare-0.1.5.dist-info/RECORD,,
|
tests/test_aimsonnx.py
CHANGED
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
from aimodelshare.aimsonnx import _get_layer_names
|
|
2
|
+
from aimodelshare.aimsonnx import _get_layer_names_pytorch
|
|
3
|
+
from aimodelshare.aimsonnx import _get_sklearn_modules
|
|
4
|
+
from aimodelshare.aimsonnx import model_from_string
|
|
5
|
+
from aimodelshare.aimsonnx import _get_pyspark_modules
|
|
6
|
+
from aimodelshare.aimsonnx import pyspark_model_from_string
|
|
7
|
+
from aimodelshare.aimsonnx import layer_mapping
|
|
8
|
+
from aimodelshare.aimsonnx import _sklearn_to_onnx
|
|
9
|
+
from aimodelshare.aimsonnx import _pyspark_to_onnx
|
|
10
|
+
from aimodelshare.aimsonnx import _keras_to_onnx
|
|
11
|
+
from aimodelshare.aimsonnx import _pytorch_to_onnx
|
|
12
|
+
from aimodelshare.aimsonnx import _misc_to_onnx
|
|
13
|
+
from sklearn.linear_model import LogisticRegression
|
|
14
|
+
from sklearn.neural_network import MLPClassifier
|
|
15
|
+
import onnx
|
|
16
|
+
from xgboost import XGBClassifier
|
|
17
|
+
from pyspark.ml.classification import RandomForestClassifier, MultilayerPerceptronClassifier
|
|
18
|
+
from keras.models import Sequential
|
|
19
|
+
from torch import nn
|
|
20
|
+
import torch
|
|
21
|
+
from tensorflow.keras.layers import Dense
|
|
22
|
+
|
|
23
|
+
def test_sklearn_to_onnx():
|
|
24
|
+
|
|
25
|
+
from sklearn.datasets import load_iris
|
|
26
|
+
data = load_iris()
|
|
27
|
+
X = data.data
|
|
28
|
+
y = data.target
|
|
29
|
+
|
|
30
|
+
model = LogisticRegression(C=10, penalty='l1', solver='liblinear')
|
|
31
|
+
model.fit(X, y)
|
|
32
|
+
onnx_model = _sklearn_to_onnx(model)
|
|
33
|
+
assert isinstance(onnx_model, onnx.ModelProto)
|
|
34
|
+
|
|
35
|
+
# model = MLPClassifier()
|
|
36
|
+
# model.fit(X, y)
|
|
37
|
+
# onnx_model = _sklearn_to_onnx(model)
|
|
38
|
+
# assert isinstance(onnx_model, onnx.ModelProto)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# def test_misc_to_onnx():
|
|
42
|
+
#
|
|
43
|
+
# model = XGBClassifier()
|
|
44
|
+
# onnx_model = _misc_to_onnx(model)
|
|
45
|
+
# assert isinstance(onnx_model, onnx.ModelProto)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# def test_pyspark_to_onnx():
|
|
49
|
+
#
|
|
50
|
+
# model =RandomForestClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures", numTrees=10)
|
|
51
|
+
# onnx_model = _pyspark_to_onnx(model)
|
|
52
|
+
# assert isinstance(onnx_model, onnx.ModelProto)
|
|
53
|
+
#
|
|
54
|
+
# model = MultilayerPerceptronClassifier()
|
|
55
|
+
# onnx_model = _pyspark_to_onnx(model)
|
|
56
|
+
# assert isinstance(onnx_model, onnx.ModelProto)
|
|
57
|
+
|
|
58
|
+
def test_keras_to_onnx():
|
|
59
|
+
|
|
60
|
+
model = Sequential()
|
|
61
|
+
model.add(Dense(12, input_shape=(8,), activation='relu'))
|
|
62
|
+
model.add(Dense(8, activation='relu'))
|
|
63
|
+
model.add(Dense(1, activation='sigmoid'))
|
|
64
|
+
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
|
|
65
|
+
|
|
66
|
+
onnx_model = _keras_to_onnx(model)
|
|
67
|
+
assert isinstance(onnx_model, onnx.ModelProto)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def test_pytorch_to_onnx():
|
|
71
|
+
|
|
72
|
+
model = nn.Sequential(nn.Linear(3, 3),
|
|
73
|
+
nn.ReLU(),
|
|
74
|
+
nn.Linear(3, 1),
|
|
75
|
+
nn.Sigmoid())
|
|
76
|
+
|
|
77
|
+
onnx_model = _pytorch_to_onnx(model, torch.randn(1, 3))
|
|
78
|
+
assert isinstance(onnx_model, onnx.ModelProto)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def test_get_layer_names():
|
|
82
|
+
|
|
83
|
+
layers = _get_layer_names()
|
|
84
|
+
|
|
85
|
+
assert isinstance(layers, tuple)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def test_get_layer_names_pytorch():
|
|
89
|
+
|
|
90
|
+
layers = _get_layer_names_pytorch()
|
|
91
|
+
|
|
92
|
+
assert isinstance(layers, tuple)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def test_get_sklearn_modules():
|
|
96
|
+
|
|
97
|
+
modules = _get_sklearn_modules()
|
|
98
|
+
|
|
99
|
+
assert isinstance(modules, dict)
|
|
100
|
+
|
|
101
|
+
def test_model_from_string():
|
|
102
|
+
|
|
103
|
+
model_class = model_from_string("RandomForestClassifier")
|
|
104
|
+
|
|
105
|
+
assert model_class.__name__ == "RandomForestClassifier"
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def test_get_pyspark_modules():
|
|
109
|
+
|
|
110
|
+
modules = _get_pyspark_modules()
|
|
111
|
+
|
|
112
|
+
assert isinstance(modules, dict)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def test_pyspark_model_from_string():
|
|
116
|
+
|
|
117
|
+
model_class = pyspark_model_from_string("RandomForestClassifier")
|
|
118
|
+
|
|
119
|
+
assert model_class.__name__ == "RandomForestClassifier"
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def test_layer_mapping():
|
|
123
|
+
|
|
124
|
+
layer_map = layer_mapping(direction="torch_to_keras")
|
|
125
|
+
assert isinstance(layer_map, dict)
|
|
126
|
+
|
|
127
|
+
layer_map = layer_mapping(direction="keras_to_torch")
|
|
128
|
+
assert isinstance(layer_map, dict)
|
|
129
|
+
|
|
130
|
+
layer_map = layer_mapping(direction="torch_to_keras", activation=True)
|
|
131
|
+
assert isinstance(layer_map, dict)
|
|
132
|
+
|
|
133
|
+
layer_map = layer_mapping(direction="keras_to_torch", activation=True)
|
|
134
|
+
assert isinstance(layer_map, dict)
|
|
135
|
+
|
tests/test_playground.py
CHANGED
|
@@ -17,25 +17,18 @@ import os
|
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
|
|
20
|
+
def test_configure_credentials():
|
|
20
21
|
|
|
21
|
-
#
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
# def test_quickstart_sklearn():
|
|
27
|
-
|
|
28
|
-
# X_train, X_test, y_train, y_test, example_data, y_test_labels = ai.import_quickstart_data("titanic")
|
|
29
|
-
|
|
30
|
-
# assert isinstance(X_train, pd.DataFrame)
|
|
31
|
-
# assert isinstance(X_test, pd.DataFrame)
|
|
32
|
-
# assert isinstance(y_train, pd.Series)
|
|
33
|
-
# assert isinstance(y_test, pd.Series)
|
|
34
|
-
# assert isinstance(example_data, pd.DataFrame)
|
|
35
|
-
# assert isinstance(y_test_labels, list)
|
|
36
|
-
|
|
22
|
+
# when testing locally, we can set credentials from file
|
|
23
|
+
try:
|
|
24
|
+
set_credentials(credential_file="../../../credentials.txt", type="deploy_model")
|
|
25
|
+
except Exception as e:
|
|
26
|
+
print(e)
|
|
37
27
|
|
|
38
|
-
|
|
28
|
+
try:
|
|
29
|
+
set_credentials(credential_file="../../credentials.txt", type="deploy_model")
|
|
30
|
+
except Exception as e:
|
|
31
|
+
print(e)
|
|
39
32
|
|
|
40
33
|
# mock user input
|
|
41
34
|
inputs = [os.environ.get('USERNAME'),
|
|
@@ -44,6 +37,7 @@ def test_configure_credentials():
|
|
|
44
37
|
os.environ.get('AWS_SECRET_ACCESS_KEY'),
|
|
45
38
|
os.environ.get('AWS_REGION')]
|
|
46
39
|
|
|
40
|
+
|
|
47
41
|
with patch("getpass.getpass", side_effect=inputs):
|
|
48
42
|
from aimodelshare.aws import configure_credentials
|
|
49
43
|
configure_credentials()
|
|
@@ -54,6 +48,17 @@ def test_configure_credentials():
|
|
|
54
48
|
|
|
55
49
|
def test_playground_sklearn():
|
|
56
50
|
|
|
51
|
+
# when testing locally, we can set credentials from file
|
|
52
|
+
try:
|
|
53
|
+
set_credentials(credential_file="../../../credentials.txt", type="deploy_model")
|
|
54
|
+
except Exception as e:
|
|
55
|
+
print(e)
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
set_credentials(credential_file="../../credentials.txt", type="deploy_model")
|
|
59
|
+
except Exception as e:
|
|
60
|
+
print(e)
|
|
61
|
+
|
|
57
62
|
# mock user input
|
|
58
63
|
inputs = [os.environ.get('USERNAME'),
|
|
59
64
|
os.environ.get('PASSWORD'),
|
|
@@ -192,6 +197,17 @@ def test_playground_sklearn():
|
|
|
192
197
|
|
|
193
198
|
def test_playground_keras():
|
|
194
199
|
|
|
200
|
+
# when testing locally, we can set credentials from file
|
|
201
|
+
try:
|
|
202
|
+
set_credentials(credential_file="../../../credentials.txt", type="deploy_model")
|
|
203
|
+
except Exception as e:
|
|
204
|
+
print(e)
|
|
205
|
+
|
|
206
|
+
try:
|
|
207
|
+
set_credentials(credential_file="../../credentials.txt", type="deploy_model")
|
|
208
|
+
except Exception as e:
|
|
209
|
+
print(e)
|
|
210
|
+
|
|
195
211
|
# mock user input
|
|
196
212
|
inputs = [os.environ.get('USERNAME'),
|
|
197
213
|
os.environ.get('PASSWORD'),
|
|
@@ -288,7 +304,6 @@ def test_playground_keras():
|
|
|
288
304
|
# example url from deployed playground: apiurl= "https://123456.execute-api.us-east-1.amazonaws.com/prod/m
|
|
289
305
|
apiurl=myplayground.playground_url
|
|
290
306
|
|
|
291
|
-
|
|
292
307
|
# Submit Model 2
|
|
293
308
|
# Generate predicted y values (Model 2)
|
|
294
309
|
prediction_column_index=keras_model_2.predict(X_test).argmax(axis=1)
|
|
@@ -343,3 +358,364 @@ def test_playground_keras():
|
|
|
343
358
|
shutil.rmtree("flower_competition_data", onerror=redo_with_write)
|
|
344
359
|
shutil.rmtree("quickstart_materials", onerror=redo_with_write)
|
|
345
360
|
shutil.rmtree("quickstart_flowers_competition", onerror=redo_with_write)
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
def test_playground_pytorch():
|
|
364
|
+
|
|
365
|
+
# when testing locally, we can set credentials from file
|
|
366
|
+
try:
|
|
367
|
+
set_credentials(credential_file="../../../credentials.txt", type="deploy_model")
|
|
368
|
+
except Exception as e:
|
|
369
|
+
print(e)
|
|
370
|
+
|
|
371
|
+
try:
|
|
372
|
+
set_credentials(credential_file="../../credentials.txt", type="deploy_model")
|
|
373
|
+
except Exception as e:
|
|
374
|
+
print(e)
|
|
375
|
+
|
|
376
|
+
# mock user input
|
|
377
|
+
inputs = [os.environ.get('USERNAME'),
|
|
378
|
+
os.environ.get('PASSWORD'),
|
|
379
|
+
os.environ.get('AWS_ACCESS_KEY_ID'),
|
|
380
|
+
os.environ.get('AWS_SECRET_ACCESS_KEY'),
|
|
381
|
+
os.environ.get('AWS_REGION')]
|
|
382
|
+
|
|
383
|
+
with patch("getpass.getpass", side_effect=inputs):
|
|
384
|
+
from aimodelshare.aws import configure_credentials
|
|
385
|
+
configure_credentials()
|
|
386
|
+
|
|
387
|
+
# set credentials
|
|
388
|
+
set_credentials(credential_file="credentials.txt", type="deploy_model")
|
|
389
|
+
# os.environ["AWS_TOKEN"]=get_aws_token()
|
|
390
|
+
|
|
391
|
+
# clean up credentials file
|
|
392
|
+
os.remove("credentials.txt")
|
|
393
|
+
|
|
394
|
+
# # Download flower image data # Download flower image file (jpg) dataset
|
|
395
|
+
import aimodelshare as ai
|
|
396
|
+
ai.download_data("public.ecr.aws/y2e2a1d6/flower-competition-data-repository:latest")
|
|
397
|
+
|
|
398
|
+
# Extract filepaths to use to import and preprocess image files...
|
|
399
|
+
base_path = 'flower-competition-data/train_images'
|
|
400
|
+
categories = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']
|
|
401
|
+
|
|
402
|
+
# Load file paths to fnames list object...
|
|
403
|
+
fnames = []
|
|
404
|
+
|
|
405
|
+
for category in categories:
|
|
406
|
+
flower_folder = os.path.join(base_path, category)
|
|
407
|
+
file_names = os.listdir(flower_folder)
|
|
408
|
+
full_path = [os.path.join(flower_folder, file_name) for file_name in file_names]
|
|
409
|
+
fnames.append(full_path)
|
|
410
|
+
|
|
411
|
+
# Here is a pre-designed preprocessor, but you could also build your own to prepare the data differently
|
|
412
|
+
|
|
413
|
+
def preprocessor(data, shape=(128, 128)):
|
|
414
|
+
"""
|
|
415
|
+
This function preprocesses reads in images, resizes them to a fixed shape and
|
|
416
|
+
min/max transforms them before converting feature values to float32 numeric values
|
|
417
|
+
required by onnx files.
|
|
418
|
+
|
|
419
|
+
params:
|
|
420
|
+
data
|
|
421
|
+
list of unprocessed images
|
|
422
|
+
|
|
423
|
+
returns:
|
|
424
|
+
X
|
|
425
|
+
numpy array of preprocessed image data
|
|
426
|
+
|
|
427
|
+
"""
|
|
428
|
+
|
|
429
|
+
import cv2
|
|
430
|
+
import numpy as np
|
|
431
|
+
|
|
432
|
+
"Resize a color image and min/max transform the image"
|
|
433
|
+
img = cv2.imread(data) # Read in image from filepath.
|
|
434
|
+
img = cv2.cvtColor(img,
|
|
435
|
+
cv2.COLOR_BGR2RGB) # cv2 reads in images in order of blue green and red, we reverse the order for ML.
|
|
436
|
+
img = cv2.resize(img, shape) # Change height and width of image.
|
|
437
|
+
img = img / 255.0 # Min-max transform.
|
|
438
|
+
|
|
439
|
+
# Resize all the images...
|
|
440
|
+
X = np.array(img)
|
|
441
|
+
X = np.expand_dims(X, axis=0) # Expand dims to add "1" to object shape [1, h, w, channels].
|
|
442
|
+
X = np.array(X, dtype=np.float32) # Final shape for onnx runtime.
|
|
443
|
+
|
|
444
|
+
# transpose image to pytorch format
|
|
445
|
+
X = np.transpose(X, (0, 3, 1, 2))
|
|
446
|
+
|
|
447
|
+
return X
|
|
448
|
+
|
|
449
|
+
# Import image, load to array of shape height, width, channels, then min/max transform...
|
|
450
|
+
|
|
451
|
+
# Read in all images from filenames...
|
|
452
|
+
preprocessed_image_data = [preprocessor(x) for x in fnames[0] + fnames[1] + fnames[2] + fnames[3] + fnames[4]]
|
|
453
|
+
|
|
454
|
+
# models require object to be an array rather than a list. (vstack converts above list to array object.)
|
|
455
|
+
import numpy as np
|
|
456
|
+
X = np.vstack(
|
|
457
|
+
preprocessed_image_data) # Assigning to X to highlight that this represents feature input data for our model.
|
|
458
|
+
|
|
459
|
+
# Create y training label data made up of correctly ordered labels from file folders...
|
|
460
|
+
from itertools import repeat
|
|
461
|
+
|
|
462
|
+
daisy = list(repeat("daisy", 507)) # i.e.: 507 filenames in daisy folder
|
|
463
|
+
dandelion = list(repeat("dandelion", 718))
|
|
464
|
+
roses = list(repeat("roses", 513))
|
|
465
|
+
sunflowers = list(repeat("sunflowers", 559))
|
|
466
|
+
tulips = list(repeat("tulips", 639))
|
|
467
|
+
|
|
468
|
+
# Combine into single list of y labels...
|
|
469
|
+
y_labels = daisy + dandelion + roses + sunflowers + tulips
|
|
470
|
+
|
|
471
|
+
# Check length, same as X above...
|
|
472
|
+
len(y_labels)
|
|
473
|
+
|
|
474
|
+
# get numerical representation of y labels
|
|
475
|
+
import pandas as pd
|
|
476
|
+
y_labels_num = pd.DataFrame(y_labels)[0].map(
|
|
477
|
+
{'daisy': 4, 'dandelion': 1, # `data_paths` has 'daisy', 'dandelion', 'sunflowers', 'roses', 'tulips'...
|
|
478
|
+
'sunflowers': 2, 'roses': 3, 'tulips': 0}) # ...but `image_paths` has 'tulips' first, and 'daisy' last.
|
|
479
|
+
|
|
480
|
+
y_labels_num = list(y_labels_num)
|
|
481
|
+
|
|
482
|
+
# train_test_split resized images...
|
|
483
|
+
from sklearn.model_selection import train_test_split
|
|
484
|
+
|
|
485
|
+
X_train, X_test, y_train, y_test = train_test_split(X, y_labels_num,
|
|
486
|
+
stratify=y_labels_num,
|
|
487
|
+
test_size=0.20,
|
|
488
|
+
random_state=1987)
|
|
489
|
+
|
|
490
|
+
import torch
|
|
491
|
+
|
|
492
|
+
# Get cpu or gpu device for training.
|
|
493
|
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
494
|
+
print(f"Using {device} device")
|
|
495
|
+
|
|
496
|
+
from torch.utils.data import DataLoader, TensorDataset
|
|
497
|
+
|
|
498
|
+
# prepare datasets for pytorch dataloader
|
|
499
|
+
tensor_X_train = torch.Tensor(X_train)
|
|
500
|
+
tensor_y_train = torch.tensor(y_train, dtype=torch.long)
|
|
501
|
+
train_ds = TensorDataset(tensor_X_train, tensor_y_train)
|
|
502
|
+
|
|
503
|
+
tensor_X_test = torch.Tensor(X_test)
|
|
504
|
+
tensor_y_test = torch.tensor(y_test, dtype=torch.long)
|
|
505
|
+
test_ds = TensorDataset(tensor_X_test, tensor_y_test)
|
|
506
|
+
|
|
507
|
+
# set up dataloaders
|
|
508
|
+
batch_size = 50
|
|
509
|
+
train_dataloader = DataLoader(train_ds, batch_size=batch_size, shuffle=False)
|
|
510
|
+
test_dataloader = DataLoader(test_ds, batch_size=batch_size, shuffle=False)
|
|
511
|
+
|
|
512
|
+
from torch import nn
|
|
513
|
+
|
|
514
|
+
# Define pytorch model
|
|
515
|
+
class NeuralNetwork(nn.Module):
|
|
516
|
+
def __init__(self):
|
|
517
|
+
super(NeuralNetwork, self).__init__()
|
|
518
|
+
self.flatten = nn.Flatten()
|
|
519
|
+
self.linear_relu_stack = nn.Sequential(
|
|
520
|
+
nn.Linear(128 * 128 * 3, 512),
|
|
521
|
+
nn.ReLU(),
|
|
522
|
+
nn.Linear(512, 512),
|
|
523
|
+
nn.ReLU(),
|
|
524
|
+
nn.Linear(512, 5)
|
|
525
|
+
)
|
|
526
|
+
|
|
527
|
+
def forward(self, x):
|
|
528
|
+
x = self.flatten(x)
|
|
529
|
+
logits = self.linear_relu_stack(x)
|
|
530
|
+
return logits
|
|
531
|
+
|
|
532
|
+
model = NeuralNetwork().to(device)
|
|
533
|
+
print(model)
|
|
534
|
+
|
|
535
|
+
# set up loss function and optimizer
|
|
536
|
+
loss_fn = nn.CrossEntropyLoss()
|
|
537
|
+
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
|
|
538
|
+
|
|
539
|
+
# define training function
|
|
540
|
+
def train(dataloader, model, loss_fn, optimizer):
|
|
541
|
+
size = len(dataloader.dataset)
|
|
542
|
+
model.train()
|
|
543
|
+
for batch, (X, y) in enumerate(dataloader):
|
|
544
|
+
X, y = X.to(device), y.to(device)
|
|
545
|
+
|
|
546
|
+
# Compute prediction error
|
|
547
|
+
pred = model(X)
|
|
548
|
+
loss = loss_fn(pred, y)
|
|
549
|
+
|
|
550
|
+
# Backpropagation
|
|
551
|
+
optimizer.zero_grad()
|
|
552
|
+
loss.backward()
|
|
553
|
+
optimizer.step()
|
|
554
|
+
|
|
555
|
+
if batch % 100 == 0:
|
|
556
|
+
loss, current = loss.item(), batch * len(X)
|
|
557
|
+
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
|
|
558
|
+
|
|
559
|
+
# define testing function
|
|
560
|
+
def test(dataloader, model, loss_fn):
|
|
561
|
+
size = len(dataloader.dataset)
|
|
562
|
+
num_batches = len(dataloader)
|
|
563
|
+
model.eval()
|
|
564
|
+
test_loss, correct = 0, 0
|
|
565
|
+
with torch.no_grad():
|
|
566
|
+
for X, y in dataloader:
|
|
567
|
+
X, y = X.to(device), y.to(device)
|
|
568
|
+
pred = model(X)
|
|
569
|
+
test_loss += loss_fn(pred, y).item()
|
|
570
|
+
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
|
|
571
|
+
test_loss /= num_batches
|
|
572
|
+
correct /= size
|
|
573
|
+
print(f"Test Error: \n Accuracy: {(100 * correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
|
|
574
|
+
|
|
575
|
+
epochs = 2
|
|
576
|
+
for t in range(epochs):
|
|
577
|
+
print(f"Epoch {t + 1}\n-------------------------------")
|
|
578
|
+
train(train_dataloader, model, loss_fn, optimizer)
|
|
579
|
+
test(test_dataloader, model, loss_fn)
|
|
580
|
+
print("Done!")
|
|
581
|
+
|
|
582
|
+
# -- Generate predicted y values (Model 1)
|
|
583
|
+
# Note: returns the predicted column index location for classification models
|
|
584
|
+
if torch.cuda.is_available():
|
|
585
|
+
prediction_column_index = model(tensor_X_test.cuda()).argmax(axis=1)
|
|
586
|
+
else:
|
|
587
|
+
prediction_column_index = model(tensor_X_test).argmax(axis=1)
|
|
588
|
+
|
|
589
|
+
# extract correct prediction labels
|
|
590
|
+
prediction_labels = [['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips'][i] for i in prediction_column_index]
|
|
591
|
+
|
|
592
|
+
# Create labels for y_test
|
|
593
|
+
y_test_labels = [['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips'][i] for i in y_test]
|
|
594
|
+
|
|
595
|
+
# Create labels for y_train
|
|
596
|
+
y_train_labels = [['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips'][i] for i in y_train]
|
|
597
|
+
|
|
598
|
+
# Instantiate Model Playground object
|
|
599
|
+
from aimodelshare.playground import ModelPlayground
|
|
600
|
+
myplayground = ModelPlayground(input_type="image", task_type="classification", private=False)
|
|
601
|
+
|
|
602
|
+
# Create Model Playground Page on modelshare.ai website
|
|
603
|
+
myplayground.create(eval_data=y_test_labels)
|
|
604
|
+
|
|
605
|
+
if torch.cuda.is_available():
|
|
606
|
+
example_input = torch.randn(1, 3, 128, 128, requires_grad=True).cuda()
|
|
607
|
+
else:
|
|
608
|
+
example_input = torch.randn(1, 3, 128, 128, requires_grad=True)
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
# Submit Model to Experiment Leaderboard
|
|
612
|
+
myplayground.submit_model(model=model,
|
|
613
|
+
preprocessor=preprocessor,
|
|
614
|
+
prediction_submission=prediction_labels,
|
|
615
|
+
input_dict={"description": "", "tags": ""},
|
|
616
|
+
submission_type="all",
|
|
617
|
+
model_input = example_input)
|
|
618
|
+
|
|
619
|
+
|
|
620
|
+
# Create example data folder to provide on model playground page
|
|
621
|
+
# for users to test prediction REST API
|
|
622
|
+
import shutil
|
|
623
|
+
os.mkdir('example_data')
|
|
624
|
+
example_images = ["flower-competition-data/train_images/daisy/100080576_f52e8ee070_n.jpg",
|
|
625
|
+
"flower-competition-data/train_images/dandelion/10200780773_c6051a7d71_n.jpg",
|
|
626
|
+
"flower-competition-data/train_images/roses/10503217854_e66a804309.jpg",
|
|
627
|
+
"flower-competition-data/train_images/sunflowers/1022552002_2b93faf9e7_n.jpg",
|
|
628
|
+
"flower-competition-data/train_images/tulips/100930342_92e8746431_n.jpg"]
|
|
629
|
+
|
|
630
|
+
for image in example_images:
|
|
631
|
+
shutil.copy(image, 'example_data')
|
|
632
|
+
|
|
633
|
+
# Deploy model by version number
|
|
634
|
+
myplayground.deploy_model(model_version=1, example_data="example_data", y_train=y_train)
|
|
635
|
+
|
|
636
|
+
# example url from deployed playground: apiurl= "https://123456.execute-api.us-east-1.amazonaws.com/prod/m
|
|
637
|
+
apiurl = myplayground.playground_url
|
|
638
|
+
|
|
639
|
+
# Submit Model 2
|
|
640
|
+
# Define model
|
|
641
|
+
class NeuralNetwork(nn.Module):
|
|
642
|
+
def __init__(self):
|
|
643
|
+
super(NeuralNetwork, self).__init__()
|
|
644
|
+
self.flatten = nn.Flatten()
|
|
645
|
+
self.linear_relu_stack = nn.Sequential(
|
|
646
|
+
nn.Linear(128 * 128 * 3, 512),
|
|
647
|
+
nn.ReLU(),
|
|
648
|
+
nn.Linear(512, 512),
|
|
649
|
+
nn.ReLU(),
|
|
650
|
+
nn.Linear(512, 256),
|
|
651
|
+
nn.ReLU(),
|
|
652
|
+
nn.Linear(256, 5)
|
|
653
|
+
)
|
|
654
|
+
|
|
655
|
+
def forward(self, x):
|
|
656
|
+
x = self.flatten(x)
|
|
657
|
+
logits = self.linear_relu_stack(x)
|
|
658
|
+
return logits
|
|
659
|
+
|
|
660
|
+
model2 = NeuralNetwork().to(device)
|
|
661
|
+
print(model2)
|
|
662
|
+
|
|
663
|
+
# set up loss function and optimizer
|
|
664
|
+
loss_fn = nn.CrossEntropyLoss()
|
|
665
|
+
optimizer = torch.optim.SGD(model2.parameters(), lr=1e-3)
|
|
666
|
+
|
|
667
|
+
# train model
|
|
668
|
+
epochs = 2
|
|
669
|
+
for t in range(epochs):
|
|
670
|
+
print(f"Epoch {t + 1}\n-------------------------------")
|
|
671
|
+
train(train_dataloader, model2, loss_fn, optimizer)
|
|
672
|
+
test(test_dataloader, model2, loss_fn)
|
|
673
|
+
print("Done!")
|
|
674
|
+
|
|
675
|
+
# Submit Model 2 to Experiment Leaderboard
|
|
676
|
+
myplayground.submit_model(model=model2,
|
|
677
|
+
preprocessor=preprocessor,
|
|
678
|
+
prediction_submission=prediction_labels,
|
|
679
|
+
input_dict={"description": "", "tags": ""},
|
|
680
|
+
submission_type="all",
|
|
681
|
+
model_input = example_input)
|
|
682
|
+
|
|
683
|
+
# submit model through competition
|
|
684
|
+
mycompetition = ai.playground.Competition(myplayground.playground_url)
|
|
685
|
+
mycompetition.submit_model(model=model2,
|
|
686
|
+
preprocessor=preprocessor,
|
|
687
|
+
prediction_submission=prediction_labels,
|
|
688
|
+
input_dict={"description": "", "tags": ""},
|
|
689
|
+
model_input=example_input)
|
|
690
|
+
|
|
691
|
+
# submit model through experiment
|
|
692
|
+
myexperiment = ai.playground.Experiment(myplayground.playground_url)
|
|
693
|
+
myexperiment.submit_model(model=model2,
|
|
694
|
+
preprocessor=preprocessor,
|
|
695
|
+
prediction_submission=prediction_labels,
|
|
696
|
+
input_dict={"description": "", "tags": ""},
|
|
697
|
+
model_input=example_input)
|
|
698
|
+
|
|
699
|
+
# Check experiment leaderboard
|
|
700
|
+
data = myplayground.get_leaderboard()
|
|
701
|
+
myplayground.stylize_leaderboard(data)
|
|
702
|
+
assert isinstance(data, pd.DataFrame)
|
|
703
|
+
|
|
704
|
+
# Compare two or more models
|
|
705
|
+
data = myplayground.compare_models([1, 2], verbose=1)
|
|
706
|
+
myplayground.stylize_compare(data)
|
|
707
|
+
assert isinstance(data, (pd.DataFrame, dict))
|
|
708
|
+
|
|
709
|
+
# Check structure of evaluation data
|
|
710
|
+
data = myplayground.inspect_eval_data()
|
|
711
|
+
assert isinstance(data, dict)
|
|
712
|
+
|
|
713
|
+
# Update runtime model
|
|
714
|
+
myplayground.update_runtime_model(model_version=2)
|
|
715
|
+
|
|
716
|
+
# delete
|
|
717
|
+
myplayground.delete_deployment(confirmation=False)
|
|
718
|
+
|
|
719
|
+
# local cleanup
|
|
720
|
+
shutil.rmtree("flower-competition-data", onerror=redo_with_write)
|
|
721
|
+
shutil.rmtree("example_data", onerror=redo_with_write)
|
|
File without changes
|
|
File without changes
|