aimodelshare 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aimodelshare/README.md +26 -0
- aimodelshare/__init__.py +100 -0
- aimodelshare/aimsonnx.py +2381 -0
- aimodelshare/api.py +836 -0
- aimodelshare/auth.py +163 -0
- aimodelshare/aws.py +511 -0
- aimodelshare/aws_client.py +173 -0
- aimodelshare/base_image.py +154 -0
- aimodelshare/bucketpolicy.py +106 -0
- aimodelshare/color_mappings/color_mapping_keras.csv +121 -0
- aimodelshare/color_mappings/color_mapping_pytorch.csv +117 -0
- aimodelshare/containerisation.py +244 -0
- aimodelshare/containerization.py +712 -0
- aimodelshare/containerization_templates/Dockerfile.txt +8 -0
- aimodelshare/containerization_templates/Dockerfile_PySpark.txt +23 -0
- aimodelshare/containerization_templates/buildspec.txt +14 -0
- aimodelshare/containerization_templates/lambda_function.txt +40 -0
- aimodelshare/custom_approach/__init__.py +1 -0
- aimodelshare/custom_approach/lambda_function.py +17 -0
- aimodelshare/custom_eval_metrics.py +103 -0
- aimodelshare/data_sharing/__init__.py +0 -0
- aimodelshare/data_sharing/data_sharing_templates/Dockerfile.txt +3 -0
- aimodelshare/data_sharing/data_sharing_templates/__init__.py +1 -0
- aimodelshare/data_sharing/data_sharing_templates/buildspec.txt +15 -0
- aimodelshare/data_sharing/data_sharing_templates/codebuild_policies.txt +129 -0
- aimodelshare/data_sharing/data_sharing_templates/codebuild_trust_relationship.txt +12 -0
- aimodelshare/data_sharing/download_data.py +620 -0
- aimodelshare/data_sharing/share_data.py +373 -0
- aimodelshare/data_sharing/utils.py +8 -0
- aimodelshare/deploy_custom_lambda.py +246 -0
- aimodelshare/documentation/Makefile +20 -0
- aimodelshare/documentation/karma_sphinx_theme/__init__.py +28 -0
- aimodelshare/documentation/karma_sphinx_theme/_version.py +2 -0
- aimodelshare/documentation/karma_sphinx_theme/breadcrumbs.html +70 -0
- aimodelshare/documentation/karma_sphinx_theme/layout.html +172 -0
- aimodelshare/documentation/karma_sphinx_theme/search.html +50 -0
- aimodelshare/documentation/karma_sphinx_theme/searchbox.html +14 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css +2 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css +2751 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css +2 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.eot +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.svg +32 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.ttf +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff2 +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/js/theme.js +68 -0
- aimodelshare/documentation/karma_sphinx_theme/theme.conf +9 -0
- aimodelshare/documentation/make.bat +35 -0
- aimodelshare/documentation/requirements.txt +2 -0
- aimodelshare/documentation/source/about.rst +18 -0
- aimodelshare/documentation/source/advanced_features.rst +137 -0
- aimodelshare/documentation/source/competition.rst +218 -0
- aimodelshare/documentation/source/conf.py +58 -0
- aimodelshare/documentation/source/create_credentials.rst +86 -0
- aimodelshare/documentation/source/example_notebooks.rst +132 -0
- aimodelshare/documentation/source/functions.rst +151 -0
- aimodelshare/documentation/source/gettingstarted.rst +390 -0
- aimodelshare/documentation/source/images/creds1.png +0 -0
- aimodelshare/documentation/source/images/creds2.png +0 -0
- aimodelshare/documentation/source/images/creds3.png +0 -0
- aimodelshare/documentation/source/images/creds4.png +0 -0
- aimodelshare/documentation/source/images/creds5.png +0 -0
- aimodelshare/documentation/source/images/creds_file_example.png +0 -0
- aimodelshare/documentation/source/images/predict_tab.png +0 -0
- aimodelshare/documentation/source/index.rst +110 -0
- aimodelshare/documentation/source/modelplayground.rst +132 -0
- aimodelshare/exceptions.py +11 -0
- aimodelshare/generatemodelapi.py +1270 -0
- aimodelshare/iam/codebuild_policy.txt +129 -0
- aimodelshare/iam/codebuild_trust_relationship.txt +12 -0
- aimodelshare/iam/lambda_policy.txt +15 -0
- aimodelshare/iam/lambda_trust_relationship.txt +12 -0
- aimodelshare/json_templates/__init__.py +1 -0
- aimodelshare/json_templates/api_json.txt +155 -0
- aimodelshare/json_templates/auth/policy.txt +1 -0
- aimodelshare/json_templates/auth/role.txt +1 -0
- aimodelshare/json_templates/eval/policy.txt +1 -0
- aimodelshare/json_templates/eval/role.txt +1 -0
- aimodelshare/json_templates/function/policy.txt +1 -0
- aimodelshare/json_templates/function/role.txt +1 -0
- aimodelshare/json_templates/integration_response.txt +5 -0
- aimodelshare/json_templates/lambda_policy_1.txt +15 -0
- aimodelshare/json_templates/lambda_policy_2.txt +8 -0
- aimodelshare/json_templates/lambda_role_1.txt +12 -0
- aimodelshare/json_templates/lambda_role_2.txt +16 -0
- aimodelshare/leaderboard.py +174 -0
- aimodelshare/main/1.txt +132 -0
- aimodelshare/main/1B.txt +112 -0
- aimodelshare/main/2.txt +153 -0
- aimodelshare/main/3.txt +134 -0
- aimodelshare/main/4.txt +128 -0
- aimodelshare/main/5.txt +109 -0
- aimodelshare/main/6.txt +105 -0
- aimodelshare/main/7.txt +144 -0
- aimodelshare/main/8.txt +142 -0
- aimodelshare/main/__init__.py +1 -0
- aimodelshare/main/authorization.txt +275 -0
- aimodelshare/main/eval_classification.txt +79 -0
- aimodelshare/main/eval_lambda.txt +1709 -0
- aimodelshare/main/eval_regression.txt +80 -0
- aimodelshare/main/lambda_function.txt +8 -0
- aimodelshare/main/nst.txt +149 -0
- aimodelshare/model.py +1543 -0
- aimodelshare/modeluser.py +215 -0
- aimodelshare/moral_compass/README.md +408 -0
- aimodelshare/moral_compass/__init__.py +65 -0
- aimodelshare/moral_compass/_version.py +3 -0
- aimodelshare/moral_compass/api_client.py +601 -0
- aimodelshare/moral_compass/apps/__init__.py +69 -0
- aimodelshare/moral_compass/apps/ai_consequences.py +540 -0
- aimodelshare/moral_compass/apps/bias_detective.py +714 -0
- aimodelshare/moral_compass/apps/ethical_revelation.py +898 -0
- aimodelshare/moral_compass/apps/fairness_fixer.py +889 -0
- aimodelshare/moral_compass/apps/judge.py +888 -0
- aimodelshare/moral_compass/apps/justice_equity_upgrade.py +853 -0
- aimodelshare/moral_compass/apps/mc_integration_helpers.py +820 -0
- aimodelshare/moral_compass/apps/model_building_game.py +1104 -0
- aimodelshare/moral_compass/apps/model_building_game_beginner.py +687 -0
- aimodelshare/moral_compass/apps/moral_compass_challenge.py +858 -0
- aimodelshare/moral_compass/apps/session_auth.py +254 -0
- aimodelshare/moral_compass/apps/shared_activity_styles.css +349 -0
- aimodelshare/moral_compass/apps/tutorial.py +481 -0
- aimodelshare/moral_compass/apps/what_is_ai.py +853 -0
- aimodelshare/moral_compass/challenge.py +365 -0
- aimodelshare/moral_compass/config.py +187 -0
- aimodelshare/placeholders/model.onnx +0 -0
- aimodelshare/placeholders/preprocessor.zip +0 -0
- aimodelshare/playground.py +1968 -0
- aimodelshare/postprocessormodules.py +157 -0
- aimodelshare/preprocessormodules.py +373 -0
- aimodelshare/pyspark/1.txt +195 -0
- aimodelshare/pyspark/1B.txt +181 -0
- aimodelshare/pyspark/2.txt +220 -0
- aimodelshare/pyspark/3.txt +204 -0
- aimodelshare/pyspark/4.txt +187 -0
- aimodelshare/pyspark/5.txt +178 -0
- aimodelshare/pyspark/6.txt +174 -0
- aimodelshare/pyspark/7.txt +211 -0
- aimodelshare/pyspark/8.txt +206 -0
- aimodelshare/pyspark/__init__.py +1 -0
- aimodelshare/pyspark/authorization.txt +258 -0
- aimodelshare/pyspark/eval_classification.txt +79 -0
- aimodelshare/pyspark/eval_lambda.txt +1441 -0
- aimodelshare/pyspark/eval_regression.txt +80 -0
- aimodelshare/pyspark/lambda_function.txt +8 -0
- aimodelshare/pyspark/nst.txt +213 -0
- aimodelshare/python/my_preprocessor.py +58 -0
- aimodelshare/readme.md +26 -0
- aimodelshare/reproducibility.py +181 -0
- aimodelshare/sam/Dockerfile.txt +8 -0
- aimodelshare/sam/Dockerfile_PySpark.txt +24 -0
- aimodelshare/sam/__init__.py +1 -0
- aimodelshare/sam/buildspec.txt +11 -0
- aimodelshare/sam/codebuild_policies.txt +129 -0
- aimodelshare/sam/codebuild_trust_relationship.txt +12 -0
- aimodelshare/sam/codepipeline_policies.txt +173 -0
- aimodelshare/sam/codepipeline_trust_relationship.txt +12 -0
- aimodelshare/sam/spark-class.txt +2 -0
- aimodelshare/sam/template.txt +54 -0
- aimodelshare/tools.py +103 -0
- aimodelshare/utils/__init__.py +78 -0
- aimodelshare/utils/optional_deps.py +38 -0
- aimodelshare/utils.py +57 -0
- aimodelshare-0.3.7.dist-info/METADATA +298 -0
- aimodelshare-0.3.7.dist-info/RECORD +171 -0
- aimodelshare-0.3.7.dist-info/WHEEL +5 -0
- aimodelshare-0.3.7.dist-info/licenses/LICENSE +5 -0
- aimodelshare-0.3.7.dist-info/top_level.txt +1 -0
aimodelshare/main/5.txt
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
#Tabular Regression Prediction Runtime Code
|
|
2
|
+
|
|
3
|
+
import boto3
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import os
|
|
6
|
+
import numpy as np
|
|
7
|
+
import onnxruntime as rt
|
|
8
|
+
import json
|
|
9
|
+
|
|
10
|
+
def get_model_onnx(runtimemodel_s3_filename="runtime_model.onnx"):
|
|
11
|
+
s3 = boto3.resource('s3')
|
|
12
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" +
|
|
13
|
+
"/runtime_model.onnx")
|
|
14
|
+
model = rt.InferenceSession(obj.get()['Body'].read())
|
|
15
|
+
return model
|
|
16
|
+
|
|
17
|
+
def get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip"):
|
|
18
|
+
import pickle
|
|
19
|
+
from zipfile import ZipFile
|
|
20
|
+
from io import BytesIO
|
|
21
|
+
import os
|
|
22
|
+
s3 = boto3.resource("s3")
|
|
23
|
+
bucket = s3.Bucket("$bucket_name")
|
|
24
|
+
|
|
25
|
+
zip_obj = s3.Object(bucket_name="$bucket_name", key="$unique_model_id/runtime_preprocessor.zip")
|
|
26
|
+
buffer = BytesIO(zip_obj.get()["Body"].read())
|
|
27
|
+
z = ZipFile(buffer)
|
|
28
|
+
# Extract all the contents of zip file in current directory
|
|
29
|
+
z.extractall("/tmp/")
|
|
30
|
+
|
|
31
|
+
folderpath=os.path.dirname(os.path.abspath("/tmp/preprocessor.py"))
|
|
32
|
+
file_name=os.path.basename("/tmp/preprocessor.py")
|
|
33
|
+
|
|
34
|
+
#Then import all pkl files you want from bucket (need to generate this list from...
|
|
35
|
+
# function globals)
|
|
36
|
+
import os
|
|
37
|
+
pickle_file_list=[]
|
|
38
|
+
for file in os.listdir(folderpath):
|
|
39
|
+
if file.endswith(".pkl"):
|
|
40
|
+
pickle_file_list.append(os.path.join(folderpath, file))
|
|
41
|
+
|
|
42
|
+
for i in pickle_file_list:
|
|
43
|
+
objectname=str(os.path.basename(i)).replace(".pkl","")
|
|
44
|
+
objects={objectname:""}
|
|
45
|
+
globals()[objectname]=pickle.load(open(str(i), "rb" ) )
|
|
46
|
+
# First import preprocessor function to session from preprocessor.py
|
|
47
|
+
exec(open(os.path.join(folderpath,'preprocessor.py')).read(),globals())
|
|
48
|
+
return preprocessor
|
|
49
|
+
|
|
50
|
+
def get_runtimedata(runtimedata_s3_filename="runtime_data.json"):
|
|
51
|
+
|
|
52
|
+
s3 = boto3.resource('s3')
|
|
53
|
+
obj = s3.Object("$bucket_name", "$unique_model_id"+"/"+runtimedata_s3_filename)
|
|
54
|
+
runtime_data = json.load(obj.get()['Body'])
|
|
55
|
+
|
|
56
|
+
return runtime_data
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
runtime_data=get_runtimedata(runtimedata_s3_filename="runtime_data.json")
|
|
60
|
+
|
|
61
|
+
preprocessor_type=runtime_data["runtime_preprocessor"]
|
|
62
|
+
|
|
63
|
+
runtime_model=runtime_data["runtime_model"]["name"]
|
|
64
|
+
|
|
65
|
+
# Load model
|
|
66
|
+
model=get_model_onnx(runtimemodel_s3_filename='runtime_model.onnx')
|
|
67
|
+
|
|
68
|
+
# Load preprocessor
|
|
69
|
+
preprocessor=get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip")
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def predict(event,model,preprocessor):
|
|
73
|
+
body = event["body"]
|
|
74
|
+
import six
|
|
75
|
+
if isinstance(event["body"], six.string_types):
|
|
76
|
+
body = json.loads(event["body"])
|
|
77
|
+
print(body["data"])
|
|
78
|
+
bodydata = pd.DataFrame.from_dict(body["data"])
|
|
79
|
+
else:
|
|
80
|
+
print(body["data"])
|
|
81
|
+
bodydata = pd.DataFrame.from_dict(body["data"])
|
|
82
|
+
print(bodydata)
|
|
83
|
+
|
|
84
|
+
input_name = model.get_inputs()[0].name
|
|
85
|
+
print(input_name)
|
|
86
|
+
|
|
87
|
+
#Preprocess data
|
|
88
|
+
input_data = preprocessor(bodydata).astype('float32') #needs to be float32
|
|
89
|
+
print(input_data)
|
|
90
|
+
|
|
91
|
+
# Generate prediction using preprocessed input data
|
|
92
|
+
|
|
93
|
+
res=model.run(None, {input_name: input_data})
|
|
94
|
+
|
|
95
|
+
result = res[0].tolist()[0]
|
|
96
|
+
|
|
97
|
+
return result
|
|
98
|
+
|
|
99
|
+
def handler(event, context):
|
|
100
|
+
result = predict(event,model,preprocessor)
|
|
101
|
+
return {"statusCode": 200,
|
|
102
|
+
"headers": {
|
|
103
|
+
"Access-Control-Allow-Origin" : "*",
|
|
104
|
+
"Access-Control-Allow-Credentials": True,
|
|
105
|
+
"Allow" : "GET, OPTIONS, POST",
|
|
106
|
+
"Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
|
|
107
|
+
"Access-Control-Allow-Headers" : "*"
|
|
108
|
+
},
|
|
109
|
+
"body": json.dumps(result)}
|
aimodelshare/main/6.txt
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
# Tabular time series regression
|
|
2
|
+
|
|
3
|
+
import boto3
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import os
|
|
6
|
+
import numpy as np
|
|
7
|
+
import onnxruntime as rt
|
|
8
|
+
import json
|
|
9
|
+
|
|
10
|
+
def get_model_onnx(runtimemodel_s3_filename="runtime_model.onnx"):
|
|
11
|
+
s3 = boto3.resource('s3')
|
|
12
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" +
|
|
13
|
+
"/runtime_model.onnx")
|
|
14
|
+
model = rt.InferenceSession(obj.get()['Body'].read())
|
|
15
|
+
return model
|
|
16
|
+
|
|
17
|
+
def get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip"):
|
|
18
|
+
import pickle
|
|
19
|
+
from zipfile import ZipFile
|
|
20
|
+
from io import BytesIO
|
|
21
|
+
import os
|
|
22
|
+
s3 = boto3.resource("s3")
|
|
23
|
+
bucket = s3.Bucket("$bucket_name")
|
|
24
|
+
|
|
25
|
+
zip_obj = s3.Object(bucket_name="$bucket_name", key="$unique_model_id/runtime_preprocessor.zip")
|
|
26
|
+
buffer = BytesIO(zip_obj.get()["Body"].read())
|
|
27
|
+
z = ZipFile(buffer)
|
|
28
|
+
# Extract all the contents of zip file in current directory
|
|
29
|
+
z.extractall("/tmp/")
|
|
30
|
+
|
|
31
|
+
folderpath=os.path.dirname(os.path.abspath("/tmp/preprocessor.py"))
|
|
32
|
+
file_name=os.path.basename("/tmp/preprocessor.py")
|
|
33
|
+
|
|
34
|
+
#Then import all pkl files you want from bucket (need to generate this list from...
|
|
35
|
+
# function globals)
|
|
36
|
+
import os
|
|
37
|
+
pickle_file_list=[]
|
|
38
|
+
for file in os.listdir(folderpath):
|
|
39
|
+
if file.endswith(".pkl"):
|
|
40
|
+
pickle_file_list.append(os.path.join(folderpath, file))
|
|
41
|
+
|
|
42
|
+
for i in pickle_file_list:
|
|
43
|
+
objectname=str(os.path.basename(i)).replace(".pkl","")
|
|
44
|
+
objects={objectname:""}
|
|
45
|
+
globals()[objectname]=pickle.load(open(str(i), "rb" ) )
|
|
46
|
+
# First import preprocessor function to session from preprocessor.py
|
|
47
|
+
exec(open(os.path.join(folderpath,'preprocessor.py')).read(),globals())
|
|
48
|
+
return preprocessor
|
|
49
|
+
|
|
50
|
+
def get_runtimedata(runtimedata_s3_filename="runtime_data.json"):
|
|
51
|
+
s3 = boto3.resource('s3')
|
|
52
|
+
obj = s3.Object("$bucket_name", "$unique_model_id"+"/"+runtimedata_s3_filename)
|
|
53
|
+
runtime_data = json.load(obj.get()['Body'])
|
|
54
|
+
|
|
55
|
+
return runtime_data
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
runtime_data = get_runtimedata(runtimedata_s3_filename="runtime_data.json")
|
|
59
|
+
|
|
60
|
+
preprocessor_type = runtime_data["runtime_preprocessor"]
|
|
61
|
+
|
|
62
|
+
runtime_model = runtime_data["runtime_model"]["name"]
|
|
63
|
+
|
|
64
|
+
# Load model
|
|
65
|
+
model = get_model_onnx(runtimemodel_s3_filename='runtime_model.onnx')
|
|
66
|
+
|
|
67
|
+
# Load preprocessor
|
|
68
|
+
preprocessor = get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip")
|
|
69
|
+
|
|
70
|
+
def predict(event, model, preprocessor):
|
|
71
|
+
body = event["body"]
|
|
72
|
+
import six
|
|
73
|
+
if isinstance(event["body"], six.string_types):
|
|
74
|
+
body = json.loads(event["body"])
|
|
75
|
+
print(body["data"])
|
|
76
|
+
bodydata = pd.DataFrame.from_dict(body["data"])
|
|
77
|
+
else:
|
|
78
|
+
print(body["data"])
|
|
79
|
+
bodydata = pd.DataFrame.from_dict(body["data"])
|
|
80
|
+
print(bodydata)
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
input_data = preprocessor(bodydata)
|
|
84
|
+
except:
|
|
85
|
+
input_data = preprocessor(bodydata).astype(np.float32).toarray()
|
|
86
|
+
|
|
87
|
+
# generate prediction using preprocessed input data
|
|
88
|
+
input_name = model.get_inputs()[0].name
|
|
89
|
+
|
|
90
|
+
res = model.run(None, {input_name: input_data})
|
|
91
|
+
|
|
92
|
+
return res[0][0].tolist()
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def handler(event, context):
|
|
96
|
+
result = predict(event, model, preprocessor)
|
|
97
|
+
return {"statusCode": 200,
|
|
98
|
+
"headers": {
|
|
99
|
+
"Access-Control-Allow-Origin": "*",
|
|
100
|
+
"Access-Control-Allow-Credentials": True,
|
|
101
|
+
"Allow": "GET, OPTIONS, POST",
|
|
102
|
+
"Access-Control-Allow-Methods": "GET, OPTIONS, POST",
|
|
103
|
+
"Access-Control-Allow-Headers": "*"
|
|
104
|
+
},
|
|
105
|
+
"body": json.dumps(result)}
|
aimodelshare/main/7.txt
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
# audio time series classification
|
|
2
|
+
|
|
3
|
+
import boto3
|
|
4
|
+
import os
|
|
5
|
+
import numpy as np
|
|
6
|
+
import json
|
|
7
|
+
import onnxruntime as rt
|
|
8
|
+
import base64
|
|
9
|
+
import six
|
|
10
|
+
from functools import partial
|
|
11
|
+
import os.path
|
|
12
|
+
from os import path
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def get_model_onnx(runtimemodel_s3_filename="runtime_model.onnx"):
|
|
16
|
+
s3 = boto3.resource('s3')
|
|
17
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" +
|
|
18
|
+
"/runtime_model.onnx")
|
|
19
|
+
model = rt.InferenceSession(obj.get()['Body'].read())
|
|
20
|
+
return model
|
|
21
|
+
|
|
22
|
+
def get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip"):
|
|
23
|
+
import pickle
|
|
24
|
+
from zipfile import ZipFile
|
|
25
|
+
from io import BytesIO
|
|
26
|
+
import os
|
|
27
|
+
s3 = boto3.resource("s3")
|
|
28
|
+
bucket = s3.Bucket("$bucket_name")
|
|
29
|
+
|
|
30
|
+
zip_obj = s3.Object(bucket_name="$bucket_name", key="$unique_model_id/runtime_preprocessor.zip")
|
|
31
|
+
buffer = BytesIO(zip_obj.get()["Body"].read())
|
|
32
|
+
z = ZipFile(buffer)
|
|
33
|
+
# Extract all the contents of zip file in current directory
|
|
34
|
+
z.extractall("/tmp/")
|
|
35
|
+
|
|
36
|
+
folderpath=os.path.dirname(os.path.abspath("/tmp/preprocessor.py"))
|
|
37
|
+
file_name=os.path.basename("/tmp/preprocessor.py")
|
|
38
|
+
|
|
39
|
+
#Then import all pkl files you want from bucket (need to generate this list from...
|
|
40
|
+
# function globals)
|
|
41
|
+
import os
|
|
42
|
+
pickle_file_list=[]
|
|
43
|
+
for file in os.listdir(folderpath):
|
|
44
|
+
if file.endswith(".pkl"):
|
|
45
|
+
pickle_file_list.append(os.path.join(folderpath, file))
|
|
46
|
+
|
|
47
|
+
for i in pickle_file_list:
|
|
48
|
+
objectname=str(os.path.basename(i)).replace(".pkl","")
|
|
49
|
+
objects={objectname:""}
|
|
50
|
+
globals()[objectname]=pickle.load(open(str(i), "rb" ) )
|
|
51
|
+
# First import preprocessor function to session from preprocessor.py
|
|
52
|
+
exec(open(os.path.join(folderpath,'preprocessor.py')).read(),globals())
|
|
53
|
+
return preprocessor
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def get_runtimedata(runtimedata_s3_filename="runtime_data.json"):
|
|
57
|
+
|
|
58
|
+
s3 = boto3.resource('s3')
|
|
59
|
+
obj = s3.Object("$bucket_name", "$unique_model_id"+"/"+runtimedata_s3_filename)
|
|
60
|
+
runtime_data = json.load(obj.get()['Body'])
|
|
61
|
+
|
|
62
|
+
return runtime_data
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
runtime_data=get_runtimedata(runtimedata_s3_filename="runtime_data.json")
|
|
66
|
+
|
|
67
|
+
# preprocessor_type=runtime_data["runtime_preprocessor"]
|
|
68
|
+
|
|
69
|
+
runtime_model=runtime_data["runtime_model"]["name"]
|
|
70
|
+
|
|
71
|
+
model=get_model_onnx(runtimemodel_s3_filename='runtime_model.onnx')
|
|
72
|
+
|
|
73
|
+
# Load preprocessor
|
|
74
|
+
|
|
75
|
+
preprocessor=get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip")
|
|
76
|
+
|
|
77
|
+
def predict(event,model,preprocessor):
|
|
78
|
+
|
|
79
|
+
# Load base64 encoded audio stored within "data" key of event dictionary
|
|
80
|
+
print(event["body"])
|
|
81
|
+
body = event["body"]
|
|
82
|
+
if isinstance(event["body"], six.string_types):
|
|
83
|
+
body = json.loads(event["body"])
|
|
84
|
+
# only supporting wav extension as of now
|
|
85
|
+
extension = body['extension']
|
|
86
|
+
bodydata=body["data"]
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
sample = base64.decodebytes(bytearray(bodydata, "utf-8"))
|
|
90
|
+
|
|
91
|
+
# Save audio to local file, read into session, and preprocess image with preprocessor function
|
|
92
|
+
with open("/tmp/audiotopredict."+extension, "wb") as fh:
|
|
93
|
+
fh.write(base64.b64decode(bodydata))
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
input_data = preprocessor(f"/tmp/audiotopredict.{extension}")
|
|
97
|
+
|
|
98
|
+
# Generate prediction using preprocessed input data
|
|
99
|
+
print("The model expects input shape:", model.get_inputs()[0].shape)
|
|
100
|
+
|
|
101
|
+
input_name = model.get_inputs()[0].name
|
|
102
|
+
|
|
103
|
+
input_data = np.float32(input_data)
|
|
104
|
+
|
|
105
|
+
res = model.run(None, {input_name: input_data})
|
|
106
|
+
|
|
107
|
+
#extract predicted probability for all classes, extract predicted label
|
|
108
|
+
|
|
109
|
+
prob = res[0]
|
|
110
|
+
|
|
111
|
+
def predict_classes(x):
|
|
112
|
+
proba=x
|
|
113
|
+
if proba.shape[-1] > 1:
|
|
114
|
+
return proba.argmax(axis=-1)
|
|
115
|
+
else:
|
|
116
|
+
return (proba > 0.5).astype("int32")
|
|
117
|
+
|
|
118
|
+
prediction_index=predict_classes(prob)
|
|
119
|
+
|
|
120
|
+
# load class labels
|
|
121
|
+
try:
|
|
122
|
+
s3 = boto3.resource('s3')
|
|
123
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" + "/labels.json")
|
|
124
|
+
labels = json.loads(obj.get()['Body'].read())
|
|
125
|
+
except:
|
|
126
|
+
labels=$labels
|
|
127
|
+
|
|
128
|
+
result=list(map(lambda x: labels[x], prediction_index))
|
|
129
|
+
|
|
130
|
+
os.remove("/tmp/audiotopredict."+extension)
|
|
131
|
+
|
|
132
|
+
return result
|
|
133
|
+
|
|
134
|
+
def handler(event, context):
|
|
135
|
+
result = predict(event,model,preprocessor)
|
|
136
|
+
return {"statusCode": 200,
|
|
137
|
+
"headers": {
|
|
138
|
+
"Access-Control-Allow-Origin" : "*",
|
|
139
|
+
"Access-Control-Allow-Credentials": True,
|
|
140
|
+
"Allow" : "GET, OPTIONS, POST",
|
|
141
|
+
"Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
|
|
142
|
+
"Access-Control-Allow-Headers" : "*"
|
|
143
|
+
},
|
|
144
|
+
"body" : json.dumps(result)}
|
aimodelshare/main/8.txt
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
# video classification
|
|
2
|
+
import boto3
|
|
3
|
+
import os
|
|
4
|
+
import numpy as np
|
|
5
|
+
import json
|
|
6
|
+
import onnxruntime as rt
|
|
7
|
+
import base64
|
|
8
|
+
import six
|
|
9
|
+
from os import path
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import warnings
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
def get_model_onnx(runtimemodel_s3_filename="runtime_model.onnx"):
|
|
16
|
+
s3 = boto3.resource('s3')
|
|
17
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" +
|
|
18
|
+
"/runtime_model.onnx")
|
|
19
|
+
model = rt.InferenceSession(obj.get()['Body'].read())
|
|
20
|
+
return model
|
|
21
|
+
|
|
22
|
+
def get_runtimedata(runtimedata_s3_filename="runtime_data.json"):
|
|
23
|
+
s3 = boto3.resource('s3')
|
|
24
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" +
|
|
25
|
+
"/"+runtimedata_s3_filename)
|
|
26
|
+
runtime_data = json.load(obj.get()['Body'])
|
|
27
|
+
|
|
28
|
+
return runtime_data
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip"):
|
|
32
|
+
import pickle
|
|
33
|
+
from zipfile import ZipFile
|
|
34
|
+
from io import BytesIO
|
|
35
|
+
import os
|
|
36
|
+
s3 = boto3.resource("s3")
|
|
37
|
+
bucket = s3.Bucket("$bucket_name")
|
|
38
|
+
|
|
39
|
+
zip_obj = s3.Object(bucket_name="$bucket_name",
|
|
40
|
+
key="$unique_model_id/runtime_preprocessor.zip")
|
|
41
|
+
buffer = BytesIO(zip_obj.get()["Body"].read())
|
|
42
|
+
z = ZipFile(buffer)
|
|
43
|
+
# Extract all the contents of zip file in current directory
|
|
44
|
+
z.extractall("/tmp/")
|
|
45
|
+
|
|
46
|
+
folderpath = os.path.dirname(os.path.abspath("/tmp/preprocessor.py"))
|
|
47
|
+
file_name = os.path.basename("/tmp/preprocessor.py")
|
|
48
|
+
|
|
49
|
+
#Then import all pkl files you want from bucket (need to generate this list from...
|
|
50
|
+
# function globals)
|
|
51
|
+
import os
|
|
52
|
+
pickle_file_list = []
|
|
53
|
+
for file in os.listdir(folderpath):
|
|
54
|
+
if file.endswith(".pkl"):
|
|
55
|
+
pickle_file_list.append(os.path.join(folderpath, file))
|
|
56
|
+
|
|
57
|
+
for i in pickle_file_list:
|
|
58
|
+
objectname = str(os.path.basename(i)).replace(".pkl", "")
|
|
59
|
+
objects = {objectname: ""}
|
|
60
|
+
globals()[objectname] = pickle.load(open(str(i), "rb"))
|
|
61
|
+
# First import preprocessor function to session from preprocessor.py
|
|
62
|
+
exec(open(os.path.join(folderpath, 'preprocessor.py')).read(), globals())
|
|
63
|
+
return preprocessor
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def predict(event, model, preprocessor):
|
|
67
|
+
|
|
68
|
+
# Load base64 encoded /. stored within "data" key of event dictionary
|
|
69
|
+
# print(event["body"])
|
|
70
|
+
body = event["body"]
|
|
71
|
+
if isinstance(event["body"], six.string_types):
|
|
72
|
+
body = json.loads(event["body"])
|
|
73
|
+
# only supporting wav extension as of now
|
|
74
|
+
extension = body['extension']
|
|
75
|
+
bodydata = body["data"]
|
|
76
|
+
|
|
77
|
+
sample = base64.decodebytes(bytearray(bodydata, "utf-8"))
|
|
78
|
+
|
|
79
|
+
# Save video to local file, read into session, and preprocess image with preprocessor function
|
|
80
|
+
with open("/tmp/videotopredict."+extension, "wb") as fh:
|
|
81
|
+
fh.write(base64.b64decode(bodydata))
|
|
82
|
+
|
|
83
|
+
input_data = preprocessor(f"/tmp/videotopredict.{extension}")
|
|
84
|
+
|
|
85
|
+
# Generate prediction using preprocessed input data
|
|
86
|
+
print("The model expects input shape:", model.get_inputs()[0].shape)
|
|
87
|
+
|
|
88
|
+
input_name = model.get_inputs()[0].name
|
|
89
|
+
input_data = np.float32(input_data)
|
|
90
|
+
|
|
91
|
+
res = model.run(None, {input_name: input_data})
|
|
92
|
+
|
|
93
|
+
# extract predicted probability for all classes, extract predicted label
|
|
94
|
+
|
|
95
|
+
prob = res[0]
|
|
96
|
+
|
|
97
|
+
def predict_classes(x):
|
|
98
|
+
proba = x
|
|
99
|
+
if proba.shape[-1] > 1:
|
|
100
|
+
return proba.argmax(axis=-1)
|
|
101
|
+
else:
|
|
102
|
+
return (proba > 0.5).astype("int32")
|
|
103
|
+
|
|
104
|
+
prediction_index = predict_classes(prob)
|
|
105
|
+
|
|
106
|
+
# load class labels
|
|
107
|
+
try:
|
|
108
|
+
s3 = boto3.resource('s3')
|
|
109
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" + "/labels.json")
|
|
110
|
+
labels = json.loads(obj.get()['Body'].read())
|
|
111
|
+
except:
|
|
112
|
+
labels=$labels
|
|
113
|
+
|
|
114
|
+
result = list(map(lambda x: labels[x], prediction_index))
|
|
115
|
+
|
|
116
|
+
os.remove("/tmp/videotopredict."+extension)
|
|
117
|
+
|
|
118
|
+
return result
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
runtime_data = get_runtimedata(runtimedata_s3_filename="runtime_data.json")
|
|
122
|
+
|
|
123
|
+
runtime_model = runtime_data["runtime_model"]["name"]
|
|
124
|
+
|
|
125
|
+
model = get_model_onnx(runtimemodel_s3_filename='runtime_model.onnx')
|
|
126
|
+
|
|
127
|
+
# Load preprocessor
|
|
128
|
+
preprocessor = get_preprocessor(
|
|
129
|
+
preprocessor_s3_filename="runtime_preprocessor.zip")
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def handler(event, context):
|
|
133
|
+
result = predict(event, model, preprocessor)
|
|
134
|
+
return {"statusCode": 200,
|
|
135
|
+
"headers": {
|
|
136
|
+
"Access-Control-Allow-Origin": "*",
|
|
137
|
+
"Access-Control-Allow-Credentials": True,
|
|
138
|
+
"Allow": "GET, OPTIONS, POST",
|
|
139
|
+
"Access-Control-Allow-Methods": "GET, OPTIONS, POST",
|
|
140
|
+
"Access-Control-Allow-Headers": "*"
|
|
141
|
+
},
|
|
142
|
+
"body": json.dumps(result)}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|