aimodelshare 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aimodelshare/README.md +26 -0
- aimodelshare/__init__.py +100 -0
- aimodelshare/aimsonnx.py +2381 -0
- aimodelshare/api.py +836 -0
- aimodelshare/auth.py +163 -0
- aimodelshare/aws.py +511 -0
- aimodelshare/aws_client.py +173 -0
- aimodelshare/base_image.py +154 -0
- aimodelshare/bucketpolicy.py +106 -0
- aimodelshare/color_mappings/color_mapping_keras.csv +121 -0
- aimodelshare/color_mappings/color_mapping_pytorch.csv +117 -0
- aimodelshare/containerisation.py +244 -0
- aimodelshare/containerization.py +712 -0
- aimodelshare/containerization_templates/Dockerfile.txt +8 -0
- aimodelshare/containerization_templates/Dockerfile_PySpark.txt +23 -0
- aimodelshare/containerization_templates/buildspec.txt +14 -0
- aimodelshare/containerization_templates/lambda_function.txt +40 -0
- aimodelshare/custom_approach/__init__.py +1 -0
- aimodelshare/custom_approach/lambda_function.py +17 -0
- aimodelshare/custom_eval_metrics.py +103 -0
- aimodelshare/data_sharing/__init__.py +0 -0
- aimodelshare/data_sharing/data_sharing_templates/Dockerfile.txt +3 -0
- aimodelshare/data_sharing/data_sharing_templates/__init__.py +1 -0
- aimodelshare/data_sharing/data_sharing_templates/buildspec.txt +15 -0
- aimodelshare/data_sharing/data_sharing_templates/codebuild_policies.txt +129 -0
- aimodelshare/data_sharing/data_sharing_templates/codebuild_trust_relationship.txt +12 -0
- aimodelshare/data_sharing/download_data.py +620 -0
- aimodelshare/data_sharing/share_data.py +373 -0
- aimodelshare/data_sharing/utils.py +8 -0
- aimodelshare/deploy_custom_lambda.py +246 -0
- aimodelshare/documentation/Makefile +20 -0
- aimodelshare/documentation/karma_sphinx_theme/__init__.py +28 -0
- aimodelshare/documentation/karma_sphinx_theme/_version.py +2 -0
- aimodelshare/documentation/karma_sphinx_theme/breadcrumbs.html +70 -0
- aimodelshare/documentation/karma_sphinx_theme/layout.html +172 -0
- aimodelshare/documentation/karma_sphinx_theme/search.html +50 -0
- aimodelshare/documentation/karma_sphinx_theme/searchbox.html +14 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css +2 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css +2751 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css +2 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.eot +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.svg +32 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.ttf +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff2 +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/js/theme.js +68 -0
- aimodelshare/documentation/karma_sphinx_theme/theme.conf +9 -0
- aimodelshare/documentation/make.bat +35 -0
- aimodelshare/documentation/requirements.txt +2 -0
- aimodelshare/documentation/source/about.rst +18 -0
- aimodelshare/documentation/source/advanced_features.rst +137 -0
- aimodelshare/documentation/source/competition.rst +218 -0
- aimodelshare/documentation/source/conf.py +58 -0
- aimodelshare/documentation/source/create_credentials.rst +86 -0
- aimodelshare/documentation/source/example_notebooks.rst +132 -0
- aimodelshare/documentation/source/functions.rst +151 -0
- aimodelshare/documentation/source/gettingstarted.rst +390 -0
- aimodelshare/documentation/source/images/creds1.png +0 -0
- aimodelshare/documentation/source/images/creds2.png +0 -0
- aimodelshare/documentation/source/images/creds3.png +0 -0
- aimodelshare/documentation/source/images/creds4.png +0 -0
- aimodelshare/documentation/source/images/creds5.png +0 -0
- aimodelshare/documentation/source/images/creds_file_example.png +0 -0
- aimodelshare/documentation/source/images/predict_tab.png +0 -0
- aimodelshare/documentation/source/index.rst +110 -0
- aimodelshare/documentation/source/modelplayground.rst +132 -0
- aimodelshare/exceptions.py +11 -0
- aimodelshare/generatemodelapi.py +1270 -0
- aimodelshare/iam/codebuild_policy.txt +129 -0
- aimodelshare/iam/codebuild_trust_relationship.txt +12 -0
- aimodelshare/iam/lambda_policy.txt +15 -0
- aimodelshare/iam/lambda_trust_relationship.txt +12 -0
- aimodelshare/json_templates/__init__.py +1 -0
- aimodelshare/json_templates/api_json.txt +155 -0
- aimodelshare/json_templates/auth/policy.txt +1 -0
- aimodelshare/json_templates/auth/role.txt +1 -0
- aimodelshare/json_templates/eval/policy.txt +1 -0
- aimodelshare/json_templates/eval/role.txt +1 -0
- aimodelshare/json_templates/function/policy.txt +1 -0
- aimodelshare/json_templates/function/role.txt +1 -0
- aimodelshare/json_templates/integration_response.txt +5 -0
- aimodelshare/json_templates/lambda_policy_1.txt +15 -0
- aimodelshare/json_templates/lambda_policy_2.txt +8 -0
- aimodelshare/json_templates/lambda_role_1.txt +12 -0
- aimodelshare/json_templates/lambda_role_2.txt +16 -0
- aimodelshare/leaderboard.py +174 -0
- aimodelshare/main/1.txt +132 -0
- aimodelshare/main/1B.txt +112 -0
- aimodelshare/main/2.txt +153 -0
- aimodelshare/main/3.txt +134 -0
- aimodelshare/main/4.txt +128 -0
- aimodelshare/main/5.txt +109 -0
- aimodelshare/main/6.txt +105 -0
- aimodelshare/main/7.txt +144 -0
- aimodelshare/main/8.txt +142 -0
- aimodelshare/main/__init__.py +1 -0
- aimodelshare/main/authorization.txt +275 -0
- aimodelshare/main/eval_classification.txt +79 -0
- aimodelshare/main/eval_lambda.txt +1709 -0
- aimodelshare/main/eval_regression.txt +80 -0
- aimodelshare/main/lambda_function.txt +8 -0
- aimodelshare/main/nst.txt +149 -0
- aimodelshare/model.py +1543 -0
- aimodelshare/modeluser.py +215 -0
- aimodelshare/moral_compass/README.md +408 -0
- aimodelshare/moral_compass/__init__.py +65 -0
- aimodelshare/moral_compass/_version.py +3 -0
- aimodelshare/moral_compass/api_client.py +601 -0
- aimodelshare/moral_compass/apps/__init__.py +69 -0
- aimodelshare/moral_compass/apps/ai_consequences.py +540 -0
- aimodelshare/moral_compass/apps/bias_detective.py +714 -0
- aimodelshare/moral_compass/apps/ethical_revelation.py +898 -0
- aimodelshare/moral_compass/apps/fairness_fixer.py +889 -0
- aimodelshare/moral_compass/apps/judge.py +888 -0
- aimodelshare/moral_compass/apps/justice_equity_upgrade.py +853 -0
- aimodelshare/moral_compass/apps/mc_integration_helpers.py +820 -0
- aimodelshare/moral_compass/apps/model_building_game.py +1104 -0
- aimodelshare/moral_compass/apps/model_building_game_beginner.py +687 -0
- aimodelshare/moral_compass/apps/moral_compass_challenge.py +858 -0
- aimodelshare/moral_compass/apps/session_auth.py +254 -0
- aimodelshare/moral_compass/apps/shared_activity_styles.css +349 -0
- aimodelshare/moral_compass/apps/tutorial.py +481 -0
- aimodelshare/moral_compass/apps/what_is_ai.py +853 -0
- aimodelshare/moral_compass/challenge.py +365 -0
- aimodelshare/moral_compass/config.py +187 -0
- aimodelshare/placeholders/model.onnx +0 -0
- aimodelshare/placeholders/preprocessor.zip +0 -0
- aimodelshare/playground.py +1968 -0
- aimodelshare/postprocessormodules.py +157 -0
- aimodelshare/preprocessormodules.py +373 -0
- aimodelshare/pyspark/1.txt +195 -0
- aimodelshare/pyspark/1B.txt +181 -0
- aimodelshare/pyspark/2.txt +220 -0
- aimodelshare/pyspark/3.txt +204 -0
- aimodelshare/pyspark/4.txt +187 -0
- aimodelshare/pyspark/5.txt +178 -0
- aimodelshare/pyspark/6.txt +174 -0
- aimodelshare/pyspark/7.txt +211 -0
- aimodelshare/pyspark/8.txt +206 -0
- aimodelshare/pyspark/__init__.py +1 -0
- aimodelshare/pyspark/authorization.txt +258 -0
- aimodelshare/pyspark/eval_classification.txt +79 -0
- aimodelshare/pyspark/eval_lambda.txt +1441 -0
- aimodelshare/pyspark/eval_regression.txt +80 -0
- aimodelshare/pyspark/lambda_function.txt +8 -0
- aimodelshare/pyspark/nst.txt +213 -0
- aimodelshare/python/my_preprocessor.py +58 -0
- aimodelshare/readme.md +26 -0
- aimodelshare/reproducibility.py +181 -0
- aimodelshare/sam/Dockerfile.txt +8 -0
- aimodelshare/sam/Dockerfile_PySpark.txt +24 -0
- aimodelshare/sam/__init__.py +1 -0
- aimodelshare/sam/buildspec.txt +11 -0
- aimodelshare/sam/codebuild_policies.txt +129 -0
- aimodelshare/sam/codebuild_trust_relationship.txt +12 -0
- aimodelshare/sam/codepipeline_policies.txt +173 -0
- aimodelshare/sam/codepipeline_trust_relationship.txt +12 -0
- aimodelshare/sam/spark-class.txt +2 -0
- aimodelshare/sam/template.txt +54 -0
- aimodelshare/tools.py +103 -0
- aimodelshare/utils/__init__.py +78 -0
- aimodelshare/utils/optional_deps.py +38 -0
- aimodelshare/utils.py +57 -0
- aimodelshare-0.3.7.dist-info/METADATA +298 -0
- aimodelshare-0.3.7.dist-info/RECORD +171 -0
- aimodelshare-0.3.7.dist-info/WHEEL +5 -0
- aimodelshare-0.3.7.dist-info/licenses/LICENSE +5 -0
- aimodelshare-0.3.7.dist-info/top_level.txt +1 -0
aimodelshare/main/1.txt
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
#Text Classification Prediction Runtime Code
|
|
2
|
+
|
|
3
|
+
import boto3
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import os
|
|
6
|
+
from io import BytesIO
|
|
7
|
+
import pickle
|
|
8
|
+
import numpy as np
|
|
9
|
+
import json
|
|
10
|
+
import onnxruntime as rt
|
|
11
|
+
import warnings
|
|
12
|
+
import six
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def get_model_onnx(runtimemodel_s3_filename="runtime_model.onnx"):
|
|
16
|
+
s3 = boto3.resource('s3')
|
|
17
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" +
|
|
18
|
+
"/runtime_model.onnx")
|
|
19
|
+
model = rt.InferenceSession(obj.get()['Body'].read())
|
|
20
|
+
return model
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip"):
|
|
25
|
+
import pickle
|
|
26
|
+
from zipfile import ZipFile
|
|
27
|
+
from io import BytesIO
|
|
28
|
+
import os
|
|
29
|
+
s3 = boto3.resource("s3")
|
|
30
|
+
bucket = s3.Bucket("$bucket_name")
|
|
31
|
+
|
|
32
|
+
zip_obj = s3.Object(bucket_name="$bucket_name", key="$unique_model_id/runtime_preprocessor.zip")
|
|
33
|
+
buffer = BytesIO(zip_obj.get()["Body"].read())
|
|
34
|
+
z = ZipFile(buffer)
|
|
35
|
+
# Extract all the contents of zip file in current directory
|
|
36
|
+
z.extractall("/tmp/")
|
|
37
|
+
|
|
38
|
+
folderpath=os.path.dirname(os.path.abspath("/tmp/preprocessor.py"))
|
|
39
|
+
file_name=os.path.basename("/tmp/preprocessor.py")
|
|
40
|
+
|
|
41
|
+
#Then import all pkl files you want from bucket (need to generate this list from...
|
|
42
|
+
# function globals)
|
|
43
|
+
import os
|
|
44
|
+
pickle_file_list=[]
|
|
45
|
+
for file in os.listdir(folderpath):
|
|
46
|
+
if file.endswith(".pkl"):
|
|
47
|
+
pickle_file_list.append(os.path.join(folderpath, file))
|
|
48
|
+
|
|
49
|
+
for i in pickle_file_list:
|
|
50
|
+
objectname=str(os.path.basename(i)).replace(".pkl","")
|
|
51
|
+
objects={objectname:""}
|
|
52
|
+
globals()[objectname]=pickle.load(open(str(i), "rb" ) )
|
|
53
|
+
# First import preprocessor function to session from preprocessor.py
|
|
54
|
+
exec(open(os.path.join(folderpath,'preprocessor.py')).read(),globals())
|
|
55
|
+
return preprocessor
|
|
56
|
+
|
|
57
|
+
def get_runtimedata(runtimedata_s3_filename="runtime_data.json"):
|
|
58
|
+
|
|
59
|
+
s3 = boto3.resource('s3')
|
|
60
|
+
obj = s3.Object("$bucket_name", "$unique_model_id"+"/"+runtimedata_s3_filename)
|
|
61
|
+
runtime_data = json.load(obj.get()['Body'])
|
|
62
|
+
|
|
63
|
+
return runtime_data
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
runtime_data=get_runtimedata(runtimedata_s3_filename="runtime_data.json")
|
|
67
|
+
|
|
68
|
+
preprocessor_type=runtime_data["runtime_preprocessor"]
|
|
69
|
+
|
|
70
|
+
runtime_model=runtime_data["runtime_model"]["name"]
|
|
71
|
+
|
|
72
|
+
# Load model
|
|
73
|
+
model=get_model_onnx(runtimemodel_s3_filename='runtime_model.onnx')
|
|
74
|
+
|
|
75
|
+
# Load preprocessor
|
|
76
|
+
preprocessor=get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip")
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def predict(event,model,preprocessor):
|
|
80
|
+
body = event["body"]
|
|
81
|
+
if isinstance(event["body"], six.string_types):
|
|
82
|
+
body = json.loads(event["body"])
|
|
83
|
+
print(body["data"])
|
|
84
|
+
bodynew = pd.Series(body["data"])
|
|
85
|
+
else:
|
|
86
|
+
print(body["data"])
|
|
87
|
+
bodynew = pd.Series(body["data"])
|
|
88
|
+
print(bodynew)
|
|
89
|
+
|
|
90
|
+
sess=model
|
|
91
|
+
def predict_classes(x): # adjusted from keras github code
|
|
92
|
+
proba=x
|
|
93
|
+
if proba.shape[-1] > 1:
|
|
94
|
+
return proba.argmax(axis=-1)
|
|
95
|
+
else:
|
|
96
|
+
return (proba > 0.5).astype("int32")
|
|
97
|
+
input_name = sess.get_inputs()[0].name
|
|
98
|
+
|
|
99
|
+
input_data = preprocessor(bodynew).astype(np.float32) #needs to be float32
|
|
100
|
+
|
|
101
|
+
res = sess.run(None, {input_name: input_data})
|
|
102
|
+
prob = res[0]
|
|
103
|
+
print(prob)
|
|
104
|
+
try:
|
|
105
|
+
prediction_index=predict_classes(prob)
|
|
106
|
+
def index_to_label(labels,index_n):
|
|
107
|
+
return labels[index_n]
|
|
108
|
+
|
|
109
|
+
# load class labels
|
|
110
|
+
try:
|
|
111
|
+
s3 = boto3.resource('s3')
|
|
112
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" + "/labels.json")
|
|
113
|
+
labels = json.loads(obj.get()['Body'].read())
|
|
114
|
+
except:
|
|
115
|
+
labels=$labels
|
|
116
|
+
|
|
117
|
+
result=list(map(lambda x: labels[x], prediction_index))
|
|
118
|
+
except:
|
|
119
|
+
result=prob.tolist()
|
|
120
|
+
return result
|
|
121
|
+
|
|
122
|
+
def handler(event, context):
|
|
123
|
+
result = predict(event,model,preprocessor)
|
|
124
|
+
return {"statusCode": 200,
|
|
125
|
+
"headers": {
|
|
126
|
+
"Access-Control-Allow-Origin" : "*",
|
|
127
|
+
"Access-Control-Allow-Credentials": True,
|
|
128
|
+
"Allow" : "GET, OPTIONS, POST",
|
|
129
|
+
"Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
|
|
130
|
+
"Access-Control-Allow-Headers" : "*"
|
|
131
|
+
},
|
|
132
|
+
"body": json.dumps(result)}
|
aimodelshare/main/1B.txt
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
#Text Regression Prediction Runtime Code
|
|
2
|
+
|
|
3
|
+
import boto3
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import os
|
|
6
|
+
from io import BytesIO
|
|
7
|
+
import pickle
|
|
8
|
+
import numpy as np
|
|
9
|
+
import json
|
|
10
|
+
import onnxruntime as rt
|
|
11
|
+
import warnings
|
|
12
|
+
import six
|
|
13
|
+
|
|
14
|
+
def get_model_onnx(runtimemodel_s3_filename="runtime_model.onnx"):
|
|
15
|
+
s3 = boto3.resource('s3')
|
|
16
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" +
|
|
17
|
+
"/runtime_model.onnx")
|
|
18
|
+
model = rt.InferenceSession(obj.get()['Body'].read())
|
|
19
|
+
return model
|
|
20
|
+
|
|
21
|
+
def get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip"):
|
|
22
|
+
import pickle
|
|
23
|
+
from zipfile import ZipFile
|
|
24
|
+
from io import BytesIO
|
|
25
|
+
import os
|
|
26
|
+
s3 = boto3.resource("s3")
|
|
27
|
+
bucket = s3.Bucket("$bucket_name")
|
|
28
|
+
|
|
29
|
+
zip_obj = s3.Object(bucket_name="$bucket_name", key="$unique_model_id/runtime_preprocessor.zip")
|
|
30
|
+
buffer = BytesIO(zip_obj.get()["Body"].read())
|
|
31
|
+
z = ZipFile(buffer)
|
|
32
|
+
# Extract all the contents of zip file in current directory
|
|
33
|
+
z.extractall("/tmp/")
|
|
34
|
+
|
|
35
|
+
folderpath=os.path.dirname(os.path.abspath("/tmp/preprocessor.py"))
|
|
36
|
+
file_name=os.path.basename("/tmp/preprocessor.py")
|
|
37
|
+
|
|
38
|
+
#Then import all pkl files you want from bucket (need to generate this list from...
|
|
39
|
+
# function globals)
|
|
40
|
+
import os
|
|
41
|
+
pickle_file_list=[]
|
|
42
|
+
for file in os.listdir(folderpath):
|
|
43
|
+
if file.endswith(".pkl"):
|
|
44
|
+
pickle_file_list.append(os.path.join(folderpath, file))
|
|
45
|
+
|
|
46
|
+
for i in pickle_file_list:
|
|
47
|
+
objectname=str(os.path.basename(i)).replace(".pkl","")
|
|
48
|
+
objects={objectname:""}
|
|
49
|
+
globals()[objectname]=pickle.load(open(str(i), "rb" ) )
|
|
50
|
+
# First import preprocessor function to session from preprocessor.py
|
|
51
|
+
exec(open(os.path.join(folderpath,'preprocessor.py')).read(),globals())
|
|
52
|
+
return preprocessor
|
|
53
|
+
|
|
54
|
+
def get_runtimedata(runtimedata_s3_filename="runtime_data.json"):
|
|
55
|
+
|
|
56
|
+
s3 = boto3.resource('s3')
|
|
57
|
+
obj = s3.Object("$bucket_name", "$unique_model_id"+"/"+runtimedata_s3_filename)
|
|
58
|
+
runtime_data = json.load(obj.get()['Body'])
|
|
59
|
+
|
|
60
|
+
return runtime_data
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
runtime_data=get_runtimedata(runtimedata_s3_filename="runtime_data.json")
|
|
64
|
+
|
|
65
|
+
preprocessor_type=runtime_data["runtime_preprocessor"]
|
|
66
|
+
|
|
67
|
+
runtime_model=runtime_data["runtime_model"]["name"]
|
|
68
|
+
|
|
69
|
+
# Load model
|
|
70
|
+
model=get_model_onnx(runtimemodel_s3_filename='runtime_model.onnx')
|
|
71
|
+
|
|
72
|
+
# Load preprocessor
|
|
73
|
+
preprocessor=get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip")
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def predict(event,model,preprocessor):
|
|
77
|
+
body = event["body"]
|
|
78
|
+
if isinstance(event["body"], six.string_types):
|
|
79
|
+
body = json.loads(event["body"])
|
|
80
|
+
print(body["data"])
|
|
81
|
+
bodynew = pd.Series(body["data"])
|
|
82
|
+
else:
|
|
83
|
+
print(body["data"])
|
|
84
|
+
bodynew = pd.Series(body["data"])
|
|
85
|
+
print(bodynew)
|
|
86
|
+
|
|
87
|
+
sess=model
|
|
88
|
+
def predict_classes(x): # adjusted from keras github code
|
|
89
|
+
proba=x
|
|
90
|
+
if proba.shape[-1] > 1:
|
|
91
|
+
return proba.argmax(axis=-1)
|
|
92
|
+
else:
|
|
93
|
+
return (proba > 0.5).astype("int32")
|
|
94
|
+
input_name = sess.get_inputs()[0].name
|
|
95
|
+
|
|
96
|
+
input_data = preprocessor(bodynew).astype(np.float32) #needs to be float32
|
|
97
|
+
|
|
98
|
+
res = sess.run(None, {input_name: input_data})
|
|
99
|
+
prob = res[0]
|
|
100
|
+
return prob.tolist()[0]
|
|
101
|
+
|
|
102
|
+
def handler(event, context):
|
|
103
|
+
result = predict(event,model,preprocessor)
|
|
104
|
+
return {"statusCode": 200,
|
|
105
|
+
"headers": {
|
|
106
|
+
"Access-Control-Allow-Origin" : "*",
|
|
107
|
+
"Access-Control-Allow-Credentials": True,
|
|
108
|
+
"Allow" : "GET, OPTIONS, POST",
|
|
109
|
+
"Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
|
|
110
|
+
"Access-Control-Allow-Headers" : "*"
|
|
111
|
+
},
|
|
112
|
+
"body": json.dumps(result)}
|
aimodelshare/main/2.txt
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
#Image Classification Prediction Runtime Code
|
|
2
|
+
|
|
3
|
+
import boto3
|
|
4
|
+
import cv2
|
|
5
|
+
import os
|
|
6
|
+
import numpy as np
|
|
7
|
+
import json
|
|
8
|
+
import onnxruntime as rt
|
|
9
|
+
import base64
|
|
10
|
+
import imghdr
|
|
11
|
+
import six
|
|
12
|
+
from functools import partial
|
|
13
|
+
import os.path
|
|
14
|
+
from os import path
|
|
15
|
+
|
|
16
|
+
def get_model_onnx(runtimemodel_s3_filename="runtime_model.onnx"):
|
|
17
|
+
s3 = boto3.resource('s3')
|
|
18
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" +
|
|
19
|
+
"/runtime_model.onnx")
|
|
20
|
+
model = rt.InferenceSession(obj.get()['Body'].read())
|
|
21
|
+
return model
|
|
22
|
+
|
|
23
|
+
def get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip"):
|
|
24
|
+
import pickle
|
|
25
|
+
from zipfile import ZipFile
|
|
26
|
+
from io import BytesIO
|
|
27
|
+
import os
|
|
28
|
+
s3 = boto3.resource("s3")
|
|
29
|
+
bucket = s3.Bucket("$bucket_name")
|
|
30
|
+
|
|
31
|
+
zip_obj = s3.Object(bucket_name="$bucket_name", key="$unique_model_id/runtime_preprocessor.zip")
|
|
32
|
+
buffer = BytesIO(zip_obj.get()["Body"].read())
|
|
33
|
+
z = ZipFile(buffer)
|
|
34
|
+
# Extract all the contents of zip file in current directory
|
|
35
|
+
z.extractall("/tmp/")
|
|
36
|
+
|
|
37
|
+
folderpath=os.path.dirname(os.path.abspath("/tmp/preprocessor.py"))
|
|
38
|
+
file_name=os.path.basename("/tmp/preprocessor.py")
|
|
39
|
+
|
|
40
|
+
#Then import all pkl files you want from bucket (need to generate this list from...
|
|
41
|
+
# function globals)
|
|
42
|
+
import os
|
|
43
|
+
pickle_file_list=[]
|
|
44
|
+
for file in os.listdir(folderpath):
|
|
45
|
+
if file.endswith(".pkl"):
|
|
46
|
+
pickle_file_list.append(os.path.join(folderpath, file))
|
|
47
|
+
|
|
48
|
+
for i in pickle_file_list:
|
|
49
|
+
objectname=str(os.path.basename(i)).replace(".pkl","")
|
|
50
|
+
objects={objectname:""}
|
|
51
|
+
globals()[objectname]=pickle.load(open(str(i), "rb" ) )
|
|
52
|
+
# First import preprocessor function to session from preprocessor.py
|
|
53
|
+
exec(open(os.path.join(folderpath,'preprocessor.py')).read(),globals())
|
|
54
|
+
return preprocessor
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def get_runtimedata(runtimedata_s3_filename="runtime_data.json"):
|
|
58
|
+
|
|
59
|
+
s3 = boto3.resource('s3')
|
|
60
|
+
obj = s3.Object("$bucket_name", "$unique_model_id"+"/"+runtimedata_s3_filename)
|
|
61
|
+
runtime_data = json.load(obj.get()['Body'])
|
|
62
|
+
|
|
63
|
+
return runtime_data
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
runtime_data=get_runtimedata(runtimedata_s3_filename="runtime_data.json")
|
|
67
|
+
|
|
68
|
+
preprocessor_type=runtime_data["runtime_preprocessor"]
|
|
69
|
+
|
|
70
|
+
runtime_model=runtime_data["runtime_model"]["name"]
|
|
71
|
+
|
|
72
|
+
model=get_model_onnx(runtimemodel_s3_filename='runtime_model.onnx')
|
|
73
|
+
|
|
74
|
+
# Load preprocessor
|
|
75
|
+
|
|
76
|
+
preprocessor=get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip")
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def predict(event,model,preprocessor):
|
|
80
|
+
|
|
81
|
+
# load class labels
|
|
82
|
+
try:
|
|
83
|
+
s3 = boto3.resource('s3')
|
|
84
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" + "/labels.json")
|
|
85
|
+
labels = json.loads(obj.get()['Body'].read())
|
|
86
|
+
except:
|
|
87
|
+
labels=$labels
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
# Load base64 encoded image stored within "data" key of event dictionary
|
|
91
|
+
print(event["body"])
|
|
92
|
+
body = event["body"]
|
|
93
|
+
if isinstance(event["body"], six.string_types):
|
|
94
|
+
body = json.loads(event["body"])
|
|
95
|
+
|
|
96
|
+
bodydata=body["data"]
|
|
97
|
+
|
|
98
|
+
# Extract image file extension (e.g.-jpg, png, etc.)
|
|
99
|
+
sample = base64.decodebytes(bytearray(bodydata, "utf-8"))
|
|
100
|
+
|
|
101
|
+
for tf in imghdr.tests:
|
|
102
|
+
image_file_type = tf(sample, None)
|
|
103
|
+
if image_file_type:
|
|
104
|
+
break;
|
|
105
|
+
image_file_type=image_file_type
|
|
106
|
+
|
|
107
|
+
if(image_file_type==None):
|
|
108
|
+
print("This file is not an image, please submit an image base64 encoded image file.")
|
|
109
|
+
|
|
110
|
+
# Save image to local file, read into session, and preprocess image with preprocessor function
|
|
111
|
+
|
|
112
|
+
with open("/tmp/imagetopredict."+image_file_type, "wb") as fh:
|
|
113
|
+
fh.write(base64.b64decode(bodydata))
|
|
114
|
+
|
|
115
|
+
input_data = preprocessor("/tmp/imagetopredict."+image_file_type)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
# Generate prediction using preprocessed input data
|
|
119
|
+
print("The model expects input shape:", model.get_inputs()[0].shape)
|
|
120
|
+
|
|
121
|
+
input_name = model.get_inputs()[0].name
|
|
122
|
+
|
|
123
|
+
res = model.run(None, {input_name: input_data})
|
|
124
|
+
|
|
125
|
+
#extract predicted probability for all classes, extract predicted label
|
|
126
|
+
|
|
127
|
+
prob = res[0]
|
|
128
|
+
|
|
129
|
+
def predict_classes(x): # adjusted from keras github code
|
|
130
|
+
if len(x.shape)==2:
|
|
131
|
+
index = x.argmax(axis=-1)
|
|
132
|
+
return list(map(lambda x: labels[x], index))
|
|
133
|
+
else:
|
|
134
|
+
return list(x)
|
|
135
|
+
|
|
136
|
+
result=predict_classes(prob)
|
|
137
|
+
|
|
138
|
+
os.remove("/tmp/imagetopredict."+image_file_type)
|
|
139
|
+
|
|
140
|
+
return result
|
|
141
|
+
|
|
142
|
+
def handler(event, context):
|
|
143
|
+
|
|
144
|
+
result = predict(event,model,preprocessor)
|
|
145
|
+
return {"statusCode": 200,
|
|
146
|
+
"headers": {
|
|
147
|
+
"Access-Control-Allow-Origin" : "*",
|
|
148
|
+
"Access-Control-Allow-Credentials": True,
|
|
149
|
+
"Allow" : "GET, OPTIONS, POST",
|
|
150
|
+
"Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
|
|
151
|
+
"Access-Control-Allow-Headers" : "*"
|
|
152
|
+
},
|
|
153
|
+
"body" : json.dumps(result)}
|
aimodelshare/main/3.txt
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
#Image Regression Prediction Runtime Code
|
|
2
|
+
|
|
3
|
+
import boto3
|
|
4
|
+
import cv2
|
|
5
|
+
import os
|
|
6
|
+
import numpy as np
|
|
7
|
+
import json
|
|
8
|
+
import onnxruntime as rt
|
|
9
|
+
import base64
|
|
10
|
+
import imghdr
|
|
11
|
+
import six
|
|
12
|
+
from functools import partial
|
|
13
|
+
import os.path
|
|
14
|
+
from os import path
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def get_model_onnx(runtimemodel_s3_filename="runtime_model.onnx"):
|
|
18
|
+
s3 = boto3.resource('s3')
|
|
19
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" +
|
|
20
|
+
"/runtime_model.onnx")
|
|
21
|
+
model = rt.InferenceSession(obj.get()['Body'].read())
|
|
22
|
+
return model
|
|
23
|
+
|
|
24
|
+
def get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip"):
|
|
25
|
+
import pickle
|
|
26
|
+
from zipfile import ZipFile
|
|
27
|
+
from io import BytesIO
|
|
28
|
+
import os
|
|
29
|
+
s3 = boto3.resource("s3")
|
|
30
|
+
bucket = s3.Bucket("$bucket_name")
|
|
31
|
+
|
|
32
|
+
zip_obj = s3.Object(bucket_name="$bucket_name", key="$unique_model_id/runtime_preprocessor.zip")
|
|
33
|
+
buffer = BytesIO(zip_obj.get()["Body"].read())
|
|
34
|
+
z = ZipFile(buffer)
|
|
35
|
+
# Extract all the contents of zip file in current directory
|
|
36
|
+
z.extractall("/tmp/")
|
|
37
|
+
|
|
38
|
+
folderpath=os.path.dirname(os.path.abspath("/tmp/preprocessor.py"))
|
|
39
|
+
file_name=os.path.basename("/tmp/preprocessor.py")
|
|
40
|
+
|
|
41
|
+
#Then import all pkl files you want from bucket (need to generate this list from...
|
|
42
|
+
# function globals)
|
|
43
|
+
import os
|
|
44
|
+
pickle_file_list=[]
|
|
45
|
+
for file in os.listdir(folderpath):
|
|
46
|
+
if file.endswith(".pkl"):
|
|
47
|
+
pickle_file_list.append(os.path.join(folderpath, file))
|
|
48
|
+
|
|
49
|
+
for i in pickle_file_list:
|
|
50
|
+
objectname=str(os.path.basename(i)).replace(".pkl","")
|
|
51
|
+
objects={objectname:""}
|
|
52
|
+
globals()[objectname]=pickle.load(open(str(i), "rb" ) )
|
|
53
|
+
# First import preprocessor function to session from preprocessor.py
|
|
54
|
+
exec(open(os.path.join(folderpath,'preprocessor.py')).read(),globals())
|
|
55
|
+
return preprocessor
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def get_runtimedata(runtimedata_s3_filename="runtime_data.json"):
|
|
59
|
+
|
|
60
|
+
s3 = boto3.resource('s3')
|
|
61
|
+
obj = s3.Object("$bucket_name", "$unique_model_id"+"/"+runtimedata_s3_filename)
|
|
62
|
+
runtime_data = json.load(obj.get()['Body'])
|
|
63
|
+
|
|
64
|
+
return runtime_data
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
runtime_data=get_runtimedata(runtimedata_s3_filename="runtime_data.json")
|
|
68
|
+
|
|
69
|
+
preprocessor_type=runtime_data["runtime_preprocessor"]
|
|
70
|
+
|
|
71
|
+
runtime_model=runtime_data["runtime_model"]["name"]
|
|
72
|
+
|
|
73
|
+
model=get_model_onnx(runtimemodel_s3_filename='runtime_model.onnx')
|
|
74
|
+
|
|
75
|
+
# Load preprocessor
|
|
76
|
+
|
|
77
|
+
preprocessor=get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip")
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def predict(event,model,preprocessor):
|
|
81
|
+
|
|
82
|
+
# Load base64 encoded image stored within "data" key of event dictionary
|
|
83
|
+
print(event["body"])
|
|
84
|
+
body = event["body"]
|
|
85
|
+
if isinstance(event["body"], six.string_types):
|
|
86
|
+
body = json.loads(event["body"])
|
|
87
|
+
|
|
88
|
+
bodydata=body["data"]
|
|
89
|
+
|
|
90
|
+
# Extract image file extension (e.g.-jpg, png, etc.)
|
|
91
|
+
sample = base64.decodebytes(bytearray(bodydata, "utf-8"))
|
|
92
|
+
|
|
93
|
+
for tf in imghdr.tests:
|
|
94
|
+
image_file_type = tf(sample, None)
|
|
95
|
+
if image_file_type:
|
|
96
|
+
break;
|
|
97
|
+
image_file_type=image_file_type
|
|
98
|
+
|
|
99
|
+
if(image_file_type==None):
|
|
100
|
+
print("This file is not an image, please submit an image base64 encoded image file.")
|
|
101
|
+
|
|
102
|
+
# Save image to local file, read into session, and preprocess image with preprocessor function
|
|
103
|
+
|
|
104
|
+
with open("/tmp/imagetopredict."+image_file_type, "wb") as fh:
|
|
105
|
+
fh.write(base64.b64decode(bodydata))
|
|
106
|
+
|
|
107
|
+
input_data = preprocessor("/tmp/imagetopredict."+image_file_type)
|
|
108
|
+
|
|
109
|
+
# Generate prediction using preprocessed input data
|
|
110
|
+
print("The model expects input shape:", model.get_inputs()[0].shape)
|
|
111
|
+
input_name = model.get_inputs()[0].name
|
|
112
|
+
|
|
113
|
+
res = model.run(None, {input_name: input_data})
|
|
114
|
+
|
|
115
|
+
#extract predicted value
|
|
116
|
+
|
|
117
|
+
result = res[0].tolist()[0]
|
|
118
|
+
|
|
119
|
+
os.remove("/tmp/imagetopredict."+image_file_type)
|
|
120
|
+
|
|
121
|
+
return result
|
|
122
|
+
|
|
123
|
+
def handler(event, context):
|
|
124
|
+
|
|
125
|
+
result = predict(event,model,preprocessor)
|
|
126
|
+
return {"statusCode": 200,
|
|
127
|
+
"headers": {
|
|
128
|
+
"Access-Control-Allow-Origin" : "*",
|
|
129
|
+
"Access-Control-Allow-Credentials": True,
|
|
130
|
+
"Allow" : "GET, OPTIONS, POST",
|
|
131
|
+
"Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
|
|
132
|
+
"Access-Control-Allow-Headers" : "*"
|
|
133
|
+
},
|
|
134
|
+
"body" : json.dumps(result)}
|
aimodelshare/main/4.txt
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
#Tabular Classification Prediction Runtime Code
|
|
2
|
+
|
|
3
|
+
import boto3
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import os
|
|
6
|
+
import numpy as np
|
|
7
|
+
import onnxruntime as rt
|
|
8
|
+
import json
|
|
9
|
+
|
|
10
|
+
class NpEncoder(json.JSONEncoder):
|
|
11
|
+
def default(self, obj):
|
|
12
|
+
if isinstance(obj, np.integer):
|
|
13
|
+
return int(obj)
|
|
14
|
+
if isinstance(obj, np.floating):
|
|
15
|
+
return float(obj)
|
|
16
|
+
if isinstance(obj, np.ndarray):
|
|
17
|
+
return obj.tolist()
|
|
18
|
+
return super(NpEncoder, self).default(obj)
|
|
19
|
+
|
|
20
|
+
def get_model_onnx(runtimemodel_s3_filename="runtime_model.onnx"):
|
|
21
|
+
s3 = boto3.resource('s3')
|
|
22
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" +
|
|
23
|
+
"/runtime_model.onnx")
|
|
24
|
+
model = rt.InferenceSession(obj.get()['Body'].read())
|
|
25
|
+
return model
|
|
26
|
+
|
|
27
|
+
def get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip"):
|
|
28
|
+
import pickle
|
|
29
|
+
from zipfile import ZipFile
|
|
30
|
+
from io import BytesIO
|
|
31
|
+
import os
|
|
32
|
+
s3 = boto3.resource("s3")
|
|
33
|
+
bucket = s3.Bucket("$bucket_name")
|
|
34
|
+
|
|
35
|
+
zip_obj = s3.Object(bucket_name="$bucket_name", key="$unique_model_id/runtime_preprocessor.zip")
|
|
36
|
+
buffer = BytesIO(zip_obj.get()["Body"].read())
|
|
37
|
+
z = ZipFile(buffer)
|
|
38
|
+
# Extract all the contents of zip file in current directory
|
|
39
|
+
z.extractall("/tmp/")
|
|
40
|
+
|
|
41
|
+
folderpath=os.path.dirname(os.path.abspath("/tmp/preprocessor.py"))
|
|
42
|
+
file_name=os.path.basename("/tmp/preprocessor.py")
|
|
43
|
+
|
|
44
|
+
#Then import all pkl files you want from bucket (need to generate this list from...
|
|
45
|
+
# function globals)
|
|
46
|
+
import os
|
|
47
|
+
pickle_file_list=[]
|
|
48
|
+
for file in os.listdir(folderpath):
|
|
49
|
+
if file.endswith(".pkl"):
|
|
50
|
+
pickle_file_list.append(os.path.join(folderpath, file))
|
|
51
|
+
|
|
52
|
+
for i in pickle_file_list:
|
|
53
|
+
objectname=str(os.path.basename(i)).replace(".pkl","")
|
|
54
|
+
objects={objectname:""}
|
|
55
|
+
globals()[objectname]=pickle.load(open(str(i), "rb" ) )
|
|
56
|
+
# First import preprocessor function to session from preprocessor.py
|
|
57
|
+
exec(open(os.path.join(folderpath,'preprocessor.py')).read(),globals())
|
|
58
|
+
return preprocessor
|
|
59
|
+
|
|
60
|
+
def get_runtimedata(runtimedata_s3_filename="runtime_data.json"):
|
|
61
|
+
|
|
62
|
+
s3 = boto3.resource('s3')
|
|
63
|
+
obj = s3.Object("$bucket_name", "$unique_model_id"+"/"+runtimedata_s3_filename)
|
|
64
|
+
runtime_data = json.load(obj.get()['Body'])
|
|
65
|
+
|
|
66
|
+
return runtime_data
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
runtime_data=get_runtimedata(runtimedata_s3_filename="runtime_data.json")
|
|
70
|
+
|
|
71
|
+
preprocessor_type=runtime_data["runtime_preprocessor"]
|
|
72
|
+
|
|
73
|
+
runtime_model=runtime_data["runtime_model"]["name"]
|
|
74
|
+
|
|
75
|
+
# Load model
|
|
76
|
+
model=get_model_onnx(runtimemodel_s3_filename='runtime_model.onnx')
|
|
77
|
+
|
|
78
|
+
# Load preprocessor
|
|
79
|
+
preprocessor=get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def predict(event,model,preprocessor):
|
|
83
|
+
body = event["body"]
|
|
84
|
+
|
|
85
|
+
# load class labels
|
|
86
|
+
try:
|
|
87
|
+
s3 = boto3.resource('s3')
|
|
88
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" + "/labels.json")
|
|
89
|
+
labels = json.loads(obj.get()['Body'].read())
|
|
90
|
+
except:
|
|
91
|
+
labels=$labels
|
|
92
|
+
|
|
93
|
+
import six
|
|
94
|
+
if isinstance(event["body"], six.string_types):
|
|
95
|
+
body = json.loads(event["body"])
|
|
96
|
+
print(body["data"])
|
|
97
|
+
bodydata = pd.DataFrame.from_dict(body["data"])
|
|
98
|
+
else:
|
|
99
|
+
print(body["data"])
|
|
100
|
+
bodydata = pd.DataFrame.from_dict(body["data"])
|
|
101
|
+
print(bodydata)
|
|
102
|
+
|
|
103
|
+
def predict_classes(x): # adjusted from keras github code
|
|
104
|
+
if len(x.shape)==2:
|
|
105
|
+
index = x.argmax(axis=-1)
|
|
106
|
+
return list(map(lambda x: labels[x], index))
|
|
107
|
+
else:
|
|
108
|
+
return list(x)
|
|
109
|
+
input_name = model.get_inputs()[0].name
|
|
110
|
+
print(input_name)
|
|
111
|
+
input_data = preprocessor(bodydata).astype('float32') #needs to be float32
|
|
112
|
+
print(input_data)
|
|
113
|
+
res=model.run(None, {input_name: input_data})
|
|
114
|
+
prob = res[0]
|
|
115
|
+
print(prob)
|
|
116
|
+
return predict_classes(prob)
|
|
117
|
+
|
|
118
|
+
def handler(event, context):
|
|
119
|
+
result = predict(event,model,preprocessor)
|
|
120
|
+
return {"statusCode": 200,
|
|
121
|
+
"headers": {
|
|
122
|
+
"Access-Control-Allow-Origin" : "*",
|
|
123
|
+
"Access-Control-Allow-Credentials": True,
|
|
124
|
+
"Allow" : "GET, OPTIONS, POST",
|
|
125
|
+
"Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
|
|
126
|
+
"Access-Control-Allow-Headers" : "*"
|
|
127
|
+
},
|
|
128
|
+
"body": json.dumps(result, cls=NpEncoder)}
|