aimodelshare 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (171) hide show
  1. aimodelshare/README.md +26 -0
  2. aimodelshare/__init__.py +100 -0
  3. aimodelshare/aimsonnx.py +2381 -0
  4. aimodelshare/api.py +836 -0
  5. aimodelshare/auth.py +163 -0
  6. aimodelshare/aws.py +511 -0
  7. aimodelshare/aws_client.py +173 -0
  8. aimodelshare/base_image.py +154 -0
  9. aimodelshare/bucketpolicy.py +106 -0
  10. aimodelshare/color_mappings/color_mapping_keras.csv +121 -0
  11. aimodelshare/color_mappings/color_mapping_pytorch.csv +117 -0
  12. aimodelshare/containerisation.py +244 -0
  13. aimodelshare/containerization.py +712 -0
  14. aimodelshare/containerization_templates/Dockerfile.txt +8 -0
  15. aimodelshare/containerization_templates/Dockerfile_PySpark.txt +23 -0
  16. aimodelshare/containerization_templates/buildspec.txt +14 -0
  17. aimodelshare/containerization_templates/lambda_function.txt +40 -0
  18. aimodelshare/custom_approach/__init__.py +1 -0
  19. aimodelshare/custom_approach/lambda_function.py +17 -0
  20. aimodelshare/custom_eval_metrics.py +103 -0
  21. aimodelshare/data_sharing/__init__.py +0 -0
  22. aimodelshare/data_sharing/data_sharing_templates/Dockerfile.txt +3 -0
  23. aimodelshare/data_sharing/data_sharing_templates/__init__.py +1 -0
  24. aimodelshare/data_sharing/data_sharing_templates/buildspec.txt +15 -0
  25. aimodelshare/data_sharing/data_sharing_templates/codebuild_policies.txt +129 -0
  26. aimodelshare/data_sharing/data_sharing_templates/codebuild_trust_relationship.txt +12 -0
  27. aimodelshare/data_sharing/download_data.py +620 -0
  28. aimodelshare/data_sharing/share_data.py +373 -0
  29. aimodelshare/data_sharing/utils.py +8 -0
  30. aimodelshare/deploy_custom_lambda.py +246 -0
  31. aimodelshare/documentation/Makefile +20 -0
  32. aimodelshare/documentation/karma_sphinx_theme/__init__.py +28 -0
  33. aimodelshare/documentation/karma_sphinx_theme/_version.py +2 -0
  34. aimodelshare/documentation/karma_sphinx_theme/breadcrumbs.html +70 -0
  35. aimodelshare/documentation/karma_sphinx_theme/layout.html +172 -0
  36. aimodelshare/documentation/karma_sphinx_theme/search.html +50 -0
  37. aimodelshare/documentation/karma_sphinx_theme/searchbox.html +14 -0
  38. aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css +2 -0
  39. aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css.map +1 -0
  40. aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css +2751 -0
  41. aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css.map +1 -0
  42. aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css +2 -0
  43. aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css.map +1 -0
  44. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.eot +0 -0
  45. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.svg +32 -0
  46. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.ttf +0 -0
  47. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff +0 -0
  48. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff2 +0 -0
  49. aimodelshare/documentation/karma_sphinx_theme/static/js/theme.js +68 -0
  50. aimodelshare/documentation/karma_sphinx_theme/theme.conf +9 -0
  51. aimodelshare/documentation/make.bat +35 -0
  52. aimodelshare/documentation/requirements.txt +2 -0
  53. aimodelshare/documentation/source/about.rst +18 -0
  54. aimodelshare/documentation/source/advanced_features.rst +137 -0
  55. aimodelshare/documentation/source/competition.rst +218 -0
  56. aimodelshare/documentation/source/conf.py +58 -0
  57. aimodelshare/documentation/source/create_credentials.rst +86 -0
  58. aimodelshare/documentation/source/example_notebooks.rst +132 -0
  59. aimodelshare/documentation/source/functions.rst +151 -0
  60. aimodelshare/documentation/source/gettingstarted.rst +390 -0
  61. aimodelshare/documentation/source/images/creds1.png +0 -0
  62. aimodelshare/documentation/source/images/creds2.png +0 -0
  63. aimodelshare/documentation/source/images/creds3.png +0 -0
  64. aimodelshare/documentation/source/images/creds4.png +0 -0
  65. aimodelshare/documentation/source/images/creds5.png +0 -0
  66. aimodelshare/documentation/source/images/creds_file_example.png +0 -0
  67. aimodelshare/documentation/source/images/predict_tab.png +0 -0
  68. aimodelshare/documentation/source/index.rst +110 -0
  69. aimodelshare/documentation/source/modelplayground.rst +132 -0
  70. aimodelshare/exceptions.py +11 -0
  71. aimodelshare/generatemodelapi.py +1270 -0
  72. aimodelshare/iam/codebuild_policy.txt +129 -0
  73. aimodelshare/iam/codebuild_trust_relationship.txt +12 -0
  74. aimodelshare/iam/lambda_policy.txt +15 -0
  75. aimodelshare/iam/lambda_trust_relationship.txt +12 -0
  76. aimodelshare/json_templates/__init__.py +1 -0
  77. aimodelshare/json_templates/api_json.txt +155 -0
  78. aimodelshare/json_templates/auth/policy.txt +1 -0
  79. aimodelshare/json_templates/auth/role.txt +1 -0
  80. aimodelshare/json_templates/eval/policy.txt +1 -0
  81. aimodelshare/json_templates/eval/role.txt +1 -0
  82. aimodelshare/json_templates/function/policy.txt +1 -0
  83. aimodelshare/json_templates/function/role.txt +1 -0
  84. aimodelshare/json_templates/integration_response.txt +5 -0
  85. aimodelshare/json_templates/lambda_policy_1.txt +15 -0
  86. aimodelshare/json_templates/lambda_policy_2.txt +8 -0
  87. aimodelshare/json_templates/lambda_role_1.txt +12 -0
  88. aimodelshare/json_templates/lambda_role_2.txt +16 -0
  89. aimodelshare/leaderboard.py +174 -0
  90. aimodelshare/main/1.txt +132 -0
  91. aimodelshare/main/1B.txt +112 -0
  92. aimodelshare/main/2.txt +153 -0
  93. aimodelshare/main/3.txt +134 -0
  94. aimodelshare/main/4.txt +128 -0
  95. aimodelshare/main/5.txt +109 -0
  96. aimodelshare/main/6.txt +105 -0
  97. aimodelshare/main/7.txt +144 -0
  98. aimodelshare/main/8.txt +142 -0
  99. aimodelshare/main/__init__.py +1 -0
  100. aimodelshare/main/authorization.txt +275 -0
  101. aimodelshare/main/eval_classification.txt +79 -0
  102. aimodelshare/main/eval_lambda.txt +1709 -0
  103. aimodelshare/main/eval_regression.txt +80 -0
  104. aimodelshare/main/lambda_function.txt +8 -0
  105. aimodelshare/main/nst.txt +149 -0
  106. aimodelshare/model.py +1543 -0
  107. aimodelshare/modeluser.py +215 -0
  108. aimodelshare/moral_compass/README.md +408 -0
  109. aimodelshare/moral_compass/__init__.py +65 -0
  110. aimodelshare/moral_compass/_version.py +3 -0
  111. aimodelshare/moral_compass/api_client.py +601 -0
  112. aimodelshare/moral_compass/apps/__init__.py +69 -0
  113. aimodelshare/moral_compass/apps/ai_consequences.py +540 -0
  114. aimodelshare/moral_compass/apps/bias_detective.py +714 -0
  115. aimodelshare/moral_compass/apps/ethical_revelation.py +898 -0
  116. aimodelshare/moral_compass/apps/fairness_fixer.py +889 -0
  117. aimodelshare/moral_compass/apps/judge.py +888 -0
  118. aimodelshare/moral_compass/apps/justice_equity_upgrade.py +853 -0
  119. aimodelshare/moral_compass/apps/mc_integration_helpers.py +820 -0
  120. aimodelshare/moral_compass/apps/model_building_game.py +1104 -0
  121. aimodelshare/moral_compass/apps/model_building_game_beginner.py +687 -0
  122. aimodelshare/moral_compass/apps/moral_compass_challenge.py +858 -0
  123. aimodelshare/moral_compass/apps/session_auth.py +254 -0
  124. aimodelshare/moral_compass/apps/shared_activity_styles.css +349 -0
  125. aimodelshare/moral_compass/apps/tutorial.py +481 -0
  126. aimodelshare/moral_compass/apps/what_is_ai.py +853 -0
  127. aimodelshare/moral_compass/challenge.py +365 -0
  128. aimodelshare/moral_compass/config.py +187 -0
  129. aimodelshare/placeholders/model.onnx +0 -0
  130. aimodelshare/placeholders/preprocessor.zip +0 -0
  131. aimodelshare/playground.py +1968 -0
  132. aimodelshare/postprocessormodules.py +157 -0
  133. aimodelshare/preprocessormodules.py +373 -0
  134. aimodelshare/pyspark/1.txt +195 -0
  135. aimodelshare/pyspark/1B.txt +181 -0
  136. aimodelshare/pyspark/2.txt +220 -0
  137. aimodelshare/pyspark/3.txt +204 -0
  138. aimodelshare/pyspark/4.txt +187 -0
  139. aimodelshare/pyspark/5.txt +178 -0
  140. aimodelshare/pyspark/6.txt +174 -0
  141. aimodelshare/pyspark/7.txt +211 -0
  142. aimodelshare/pyspark/8.txt +206 -0
  143. aimodelshare/pyspark/__init__.py +1 -0
  144. aimodelshare/pyspark/authorization.txt +258 -0
  145. aimodelshare/pyspark/eval_classification.txt +79 -0
  146. aimodelshare/pyspark/eval_lambda.txt +1441 -0
  147. aimodelshare/pyspark/eval_regression.txt +80 -0
  148. aimodelshare/pyspark/lambda_function.txt +8 -0
  149. aimodelshare/pyspark/nst.txt +213 -0
  150. aimodelshare/python/my_preprocessor.py +58 -0
  151. aimodelshare/readme.md +26 -0
  152. aimodelshare/reproducibility.py +181 -0
  153. aimodelshare/sam/Dockerfile.txt +8 -0
  154. aimodelshare/sam/Dockerfile_PySpark.txt +24 -0
  155. aimodelshare/sam/__init__.py +1 -0
  156. aimodelshare/sam/buildspec.txt +11 -0
  157. aimodelshare/sam/codebuild_policies.txt +129 -0
  158. aimodelshare/sam/codebuild_trust_relationship.txt +12 -0
  159. aimodelshare/sam/codepipeline_policies.txt +173 -0
  160. aimodelshare/sam/codepipeline_trust_relationship.txt +12 -0
  161. aimodelshare/sam/spark-class.txt +2 -0
  162. aimodelshare/sam/template.txt +54 -0
  163. aimodelshare/tools.py +103 -0
  164. aimodelshare/utils/__init__.py +78 -0
  165. aimodelshare/utils/optional_deps.py +38 -0
  166. aimodelshare/utils.py +57 -0
  167. aimodelshare-0.3.7.dist-info/METADATA +298 -0
  168. aimodelshare-0.3.7.dist-info/RECORD +171 -0
  169. aimodelshare-0.3.7.dist-info/WHEEL +5 -0
  170. aimodelshare-0.3.7.dist-info/licenses/LICENSE +5 -0
  171. aimodelshare-0.3.7.dist-info/top_level.txt +1 -0
@@ -0,0 +1,80 @@
1
+ import boto3
2
+ import pandas as pd
3
+ import os
4
+ import numpy as np
5
+ import onnxruntime as rt
6
+ import json
7
+ import sklearn
8
+ from sklearn.metrics import accuracy_score
9
+ from sklearn.metrics import f1_score
10
+ from sklearn.metrics import precision_score
11
+ from sklearn.metrics import recall_score
12
+ from sklearn.metrics import roc_auc_score
13
+ from sklearn.metrics import mean_squared_error
14
+ from sklearn.metrics import r2_score
15
+ from sklearn.metrics import mean_absolute_error
16
+ from math import sqrt
17
+ import pickle
18
+ import six
19
+
20
+ def get_ytestdata(ytest_s3_filename="ytest.pkl"):
21
+ s3 = boto3.resource("s3")
22
+ bucket = s3.Bucket("$bucket_name")
23
+
24
+ with open("/tmp/ytest.pkl", "wb") as ytestfo:
25
+ bucket.download_fileobj("$unique_model_id/ytest.pkl", ytestfo)
26
+ ytestdata = pickle.load(open("/tmp/ytest.pkl", "rb" ) )
27
+ return ytestdata
28
+
29
+ def model_eval_metrics(y_true, y_pred,classification="TRUE"):
30
+ if classification=="TRUE":
31
+ accuracy_eval = accuracy_score(y_true, y_pred)
32
+ f1_score_eval = f1_score(y_true, y_pred,average="macro",zero_division=0)
33
+ precision_eval = precision_score(y_true, y_pred,average="macro",zero_division=0)
34
+ recall_eval = recall_score(y_true, y_pred,average="macro",zero_division=0)
35
+ mse_eval = 0
36
+ rmse_eval = 0
37
+ mae_eval = 0
38
+ r2_eval = 0
39
+ metricdata = {'accuracy': [accuracy_eval], 'f1_score': [f1_score_eval], 'precision': [precision_eval], 'recall': [recall_eval], 'mse': [mse_eval], 'rmse': [rmse_eval], 'mae': [mae_eval], 'r2': [r2_eval]}
40
+ finalmetricdata = pd.DataFrame.from_dict(metricdata)
41
+ else:
42
+ accuracy_eval = 0
43
+ f1_score_eval = 0
44
+ precision_eval = 0
45
+ recall_eval = 0
46
+ mse_eval = mean_squared_error(y_true, y_pred)
47
+ rmse_eval = sqrt(mean_squared_error(y_true, y_pred))
48
+ mae_eval = mean_absolute_error(y_true, y_pred)
49
+ r2_eval = r2_score(y_true, y_pred)
50
+ metricdata = {'accuracy': [accuracy_eval], 'f1_score': [f1_score_eval], 'precision': [precision_eval], 'recall': [recall_eval], 'mse': [mse_eval], 'rmse': [rmse_eval], 'mae': [mae_eval], 'r2': [r2_eval]}
51
+ finalmetricdata = pd.DataFrame.from_dict(metricdata)
52
+ return finalmetricdata.to_dict('records')[0]
53
+
54
+ ytestdata=get_ytestdata(ytest_s3_filename="ytest.pkl")
55
+
56
+ def evaluate_model(event,ytestdata):
57
+ body = event["body"]
58
+ print(body)
59
+ import six
60
+ if isinstance(event["body"], six.string_types):
61
+ prediction_list = json.loads(event["body"])
62
+ print(prediction_list)
63
+ else:
64
+ prediction_list = event["body"]
65
+ print(prediction_list)
66
+
67
+ result=model_eval_metrics(ytestdata,prediction_list,classification="FALSE")
68
+ return result
69
+
70
+ def handler(event, context):
71
+ result = evaluate_model(event,ytestdata)
72
+ return {"statusCode": 200,
73
+ "headers": {
74
+ "Access-Control-Allow-Origin" : "*",
75
+ "Access-Control-Allow-Credentials": True,
76
+ "Allow" : "GET, OPTIONS, POST",
77
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
78
+ "Access-Control-Allow-Headers" : "*"
79
+ },
80
+ "body": json.dumps(result)}
@@ -0,0 +1,8 @@
1
+ import json
2
+ from model import handler
3
+
4
+ def lambda_handler(event, context):
5
+
6
+ print("lambda_handler from main")
7
+
8
+ return handler(event, context)
@@ -0,0 +1,213 @@
1
+ #Neural Style Transfer Prediction Runtime Code
2
+
3
+ import boto3
4
+ import cv2
5
+ import os
6
+ import numpy as np
7
+ import json
8
+ import onnxruntime as rt
9
+ import base64
10
+ import imghdr
11
+ import six
12
+ from functools import partial
13
+ import os.path
14
+ from os import path
15
+ from io import BytesIO
16
+ from PIL import Image
17
+ import pyspark
18
+ import pyspark.ml
19
+ import pandas as pd
20
+
21
+ def get_model_onnx(runtimemodel_s3_filename="runtime_model.onnx"):
22
+ s3 = boto3.resource('s3')
23
+ obj = s3.Object("$bucket_name", "$unique_model_id" +
24
+ "/runtime_model.onnx")
25
+ model = rt.InferenceSession(obj.get()['Body'].read())
26
+ return model
27
+
28
+ def _get_pyspark_modules():
29
+ import re
30
+ pyspark_modules = ['ml', 'ml.feature', 'ml.classification', 'ml.clustering', 'ml.regression']
31
+
32
+ models_modules_dict = {}
33
+
34
+ for i in pyspark_modules:
35
+ models_list = [j for j in dir(eval('pyspark.'+i)) if callable(getattr(eval('pyspark.'+i), j))]
36
+ models_list = [j for j in models_list if re.match('^[A-Z]', j)]
37
+
38
+ for k in models_list:
39
+ models_modules_dict[k] = 'pyspark.'+i
40
+
41
+ return models_modules_dict
42
+
43
+ def pyspark_model_from_string(model_type):
44
+ import importlib
45
+
46
+ models_modules_dict = _get_pyspark_modules()
47
+ module = models_modules_dict[model_type]
48
+ model_class = getattr(importlib.import_module(module), model_type)
49
+ return model_class
50
+
51
+ def get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip"):
52
+ import os
53
+ import pickle
54
+ import tempfile
55
+ from io import BytesIO
56
+ from pathlib import Path
57
+ from zipfile import ZipFile
58
+ from pyspark.sql import SparkSession
59
+
60
+
61
+ #create temporary folder
62
+ temp_dir = tempfile.gettempdir()
63
+
64
+ s3 = boto3.resource("s3")
65
+ bucket = s3.Bucket("$bucket_name")
66
+
67
+ zip_obj = s3.Object(bucket_name="$bucket_name",
68
+ key="$unique_model_id/runtime_preprocessor.zip")
69
+ buffer = BytesIO(zip_obj.get()["Body"].read())
70
+ z = ZipFile(buffer)
71
+ # Extract all the contents of zip file in temp directory
72
+ z.extractall(temp_dir)
73
+
74
+ # Then import all pkl files you want from bucket (need to generate this list from
75
+ # function globals
76
+ pickle_file_list = []
77
+ zip_file_list = []
78
+ for file in os.listdir(temp_dir):
79
+ if file.endswith(".pkl"):
80
+ pickle_file_list.append(os.path.join(temp_dir, file))
81
+ if file.endswith(".zip"):
82
+ zip_file_list.append(os.path.join(temp_dir, file))
83
+
84
+ for i in pickle_file_list:
85
+ objectname = str(os.path.basename(i)).replace(".pkl", "")
86
+ objects = { objectname: "" }
87
+ globals()[objectname] = pickle.load(open(str(i), "rb"))
88
+
89
+ # Need spark session and context to instantiate model object
90
+ # zip_file_list is only used by pyspark
91
+ if len(zip_file_list):
92
+ spark = SparkSession \
93
+ .builder \
94
+ .appName('Pyspark Model') \
95
+ .getOrCreate()
96
+
97
+ for i in zip_file_list:
98
+ objectnames = str(os.path.basename(i)).replace(".zip", "").split("__")
99
+ dir_path = i.replace(".zip", "")
100
+ Path(dir_path).mkdir(parents=True, exist_ok=True)
101
+
102
+ # Create a ZipFile Object and load module.zip in it
103
+ with ZipFile(i, 'r') as zipObj:
104
+ # Extract all the contents of zip file in current directory
105
+ zipObj.extractall(dir_path)
106
+
107
+ preprocessor_type = objectnames[0].split("_")[0]
108
+ objectname = objectnames[1]
109
+ preprocessor_class = pyspark_model_from_string(preprocessor_type)
110
+ if preprocessor_type == "PipelineModel":
111
+ print(preprocessor_class)
112
+ preprocessor_model = preprocessor_class(stages=None)
113
+ else:
114
+ preprocessor_model = preprocessor_class()
115
+
116
+ preprocessor_model = preprocessor_model.load(dir_path)
117
+ globals()[objectname] = preprocessor_model
118
+
119
+ # First import preprocessor function to session from preprocessor.py
120
+ exec(open(os.path.join(temp_dir, 'preprocessor.py')).read(),globals())
121
+ return preprocessor
122
+
123
+ def get_runtimedata(runtimedata_s3_filename="runtime_data.json"):
124
+
125
+ s3 = boto3.resource('s3')
126
+ obj = s3.Object("$bucket_name", "$unique_model_id"+"/"+runtimedata_s3_filename)
127
+ runtime_data = json.load(obj.get()['Body'])
128
+
129
+ return runtime_data
130
+
131
+
132
+
133
+ runtime_data=get_runtimedata(runtimedata_s3_filename="runtime_data.json")
134
+
135
+ preprocessor_type=runtime_data["runtime_preprocessor"]
136
+
137
+ runtime_model=runtime_data["runtime_model"]["name"]
138
+
139
+ model=get_model_onnx(runtimemodel_s3_filename='runtime_model.onnx')
140
+
141
+ # Load preprocessor
142
+
143
+ preprocessor=get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip")
144
+
145
+
146
+
147
+ def predict(event,model,preprocessor):
148
+
149
+ # Load base64 encoded image stored within "data" key of event dictionary
150
+ body = event["body"]
151
+
152
+ if isinstance(event["body"], six.string_types):
153
+ body = json.loads(event["body"])
154
+
155
+ content= body["content"]
156
+ style= body["style"]
157
+
158
+ # Extract image file extension (e.g.-jpg, png, etc.)
159
+
160
+ sample_content = base64.decodebytes(bytearray(content, "utf-8"))
161
+ sample_style = base64.decodebytes(bytearray(style, "utf-8"))
162
+
163
+ content_file_type=None
164
+ for tf in imghdr.tests:
165
+ image_file_type = tf(sample_content, None)
166
+ if image_file_type:
167
+ break
168
+ content_file_type=image_file_type
169
+ if(content_file_type==None):
170
+ print("This file is not an image, please submit an image base64 encoded image file.")
171
+
172
+ style_file_type=None
173
+ for tf in imghdr.tests:
174
+ image_file_type = tf(sample_style, None)
175
+ if image_file_type:
176
+ break
177
+ style_file_type=image_file_type
178
+ if(style_file_type==None):
179
+ print("This file is not an image, please submit an image base64 encoded image file.")
180
+
181
+ # Save image to local file, read into session, and preprocess image with preprocessor function
182
+ with open("/tmp/imagetopredict."+content_file_type, "wb") as fh:
183
+ fh.write(base64.b64decode(content))
184
+ content_image = preprocessor("/tmp/imagetopredict."+content_file_type)
185
+ with open("/tmp/imagetopredict."+style_file_type, "wb") as fh:
186
+ fh.write(base64.b64decode(style))
187
+ style_image = preprocessor("/tmp/imagetopredict."+style_file_type)
188
+
189
+ # Generate prediction using preprocessed input data
190
+ print("The model expects input shape:", model.get_inputs()[0].shape)
191
+ input_name = model.get_inputs()[0].name
192
+ res = model.run(None, {
193
+ "placeholder": content_image,
194
+ "placeholder_1": style_image
195
+ })
196
+
197
+ pil_img = Image.fromarray(np.uint8(res[0].squeeze(axis=0)*255))
198
+ buffered = BytesIO()
199
+ pil_img.save(buffered, format="JPEG")
200
+ img_str = base64.b64encode(buffered.getvalue())
201
+ return img_str.decode("utf-8")
202
+
203
+ def handler(event, context):
204
+ result = predict(event,model,preprocessor)
205
+ return {"statusCode": 200,
206
+ "headers": {
207
+ "Access-Control-Allow-Origin" : "*",
208
+ "Access-Control-Allow-Credentials": True,
209
+ "Allow" : "GET, OPTIONS, POST",
210
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
211
+ "Access-Control-Allow-Headers" : "*"
212
+ },
213
+ "body" : json.dumps(result)}
@@ -0,0 +1,58 @@
1
+ import pandas as pd
2
+ import numpy as np
3
+
4
+ from sklearn.compose import ColumnTransformer
5
+ from sklearn.pipeline import Pipeline
6
+ from sklearn.impute import SimpleImputer
7
+ from sklearn.preprocessing import StandardScaler, OneHotEncoder
8
+ from sklearn.linear_model import LogisticRegression
9
+ from sklearn.model_selection import train_test_split, GridSearchCV
10
+
11
+ np.random.seed(0)
12
+
13
+ # Read data from Titanic dataset.
14
+ titanic_url = ('https://raw.githubusercontent.com/amueller/'
15
+ 'scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv')
16
+ data = pd.read_csv(titanic_url)
17
+
18
+ # We will train our classifier with the following features:
19
+ # Numeric Features:
20
+ # - age: float.
21
+ # - fare: float.
22
+ # Categorical Features:
23
+ # - embarked: categories encoded as strings {'C', 'S', 'Q'}.
24
+ # - sex: categories encoded as strings {'female', 'male'}.
25
+ # - pclass: ordinal integers {1, 2, 3}.
26
+
27
+ # We create the preprocessing pipelines for both numeric and categorical data.
28
+ numeric_features = ['age', 'fare']
29
+ numeric_transformer = Pipeline(steps=[
30
+ ('imputer', SimpleImputer(strategy='median')),
31
+ ('scaler', StandardScaler())])
32
+
33
+ categorical_features = ['embarked', 'sex', 'pclass']
34
+
35
+ # Replacing missing values with Modal value and then one-hot encoding.
36
+ categorical_transformer = Pipeline(steps=[
37
+ ('imputer', SimpleImputer(strategy='most_frequent')),
38
+ ('onehot', OneHotEncoder(handle_unknown='ignore'))])
39
+
40
+ # Final preprocessor object set up with ColumnTransformer...
41
+
42
+ preprocess = ColumnTransformer(
43
+ transformers=[
44
+ ('num', numeric_transformer, numeric_features),
45
+ ('cat', categorical_transformer, categorical_features)])
46
+
47
+
48
+ X = data.drop('survived', axis=1)
49
+ X = data.drop('name', axis=1)
50
+ y = data['survived']
51
+
52
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
53
+
54
+ preprocess = preprocess.fit(X_train)
55
+ def preprocessor(data):
56
+ import sklearn
57
+ preprocessed_data=preprocess.transform(data)
58
+ return preprocessed_data
aimodelshare/readme.md ADDED
@@ -0,0 +1,26 @@
1
+ <p align="center"><img width="40%" src="https://github.com/AIModelShare/aimodelshare/blob/master/docs/aimodshare_banner.jpg" /></p>
2
+
3
+ ### The mission of the AI Model Share Platform is to provide a trusted non profit repository for machine learning model prediction APIs (python library + integrated website at modelshare.org). A beta version of the platform is currently being used by Columbia University students, faculty, and staff to test and improve platform functionality.
4
+
5
+ ### In a matter of seconds, data scientists can launch a model into this infrastructure and end-users the world over will be able to engage their machine learning models.
6
+
7
+ * ***Launch machine learning models into scalable production ready prediction REST APIs using a single Python function.***
8
+
9
+ * ***Details about each model, how to use the model's API, and the model's author(s) are deployed simultaneously into a searchable website at modelshare.org.***
10
+
11
+ * ***Deployed models receive an individual Model Playground listing information about all deployed models. Each of these pages includes a fully functional prediction dashboard that allows end-users to input text, tabular, or image data and receive live predictions.***
12
+
13
+ * ***Moreover, users can build on model playgrounds by 1) creating ML model competitions, 2) uploading Jupyter notebooks to share code, 3) sharing model architectures and 4) sharing data... with all shared artifacts automatically creating a data science user portfolio.***
14
+
15
+ # Use aimodelshare Python library to deploy your model, create a new ML competition, and more.
16
+ * [Tutorials for deploying models](https://www.modelshare.org/search/deploy?search=ALL&problemdomain=ALL&gettingstartedguide=TRUE&pythonlibrariesused=ALL&tags=ALL&pageNum=1).
17
+
18
+ # Find model playground web-dashboards to generate predictions now.
19
+ * [View deployed models and generate predictions at modelshare.org](https://www.modelshare.org)
20
+
21
+ # Installation
22
+
23
+ You can then install aimodelshare from PyPi
24
+ ```
25
+ pip install aimodelshare
26
+ ```
@@ -0,0 +1,181 @@
1
+ import os
2
+ import sys
3
+ import json
4
+ import random
5
+ import tempfile
6
+ import requests
7
+
8
+ import numpy as np
9
+
10
+ # TensorFlow is optional - only needed for reproducibility setup with TF models
11
+ try:
12
+ import tensorflow as tf
13
+ _TF_AVAILABLE = True
14
+ except ImportError:
15
+ _TF_AVAILABLE = False
16
+ tf = None
17
+
18
+ try:
19
+ import importlib.metadata as md
20
+ except ImportError: # pragma: no cover
21
+ import importlib_metadata as md
22
+
23
+ from aimodelshare.aws import get_s3_iam_client, run_function_on_lambda, get_aws_client
24
+
25
+ def export_reproducibility_env(seed, directory, mode="gpu"):
26
+ # Change the output into json.dumps
27
+ # Argument single seed for all inputs & mode
28
+ data = {
29
+ "global_seed_code": [
30
+ "os.environ['PYTHONHASHSEED'] = '{}'".format(seed),
31
+ "random.seed({})".format(seed),
32
+ "tf.random.set_seed({})".format(seed),
33
+ "np.random.seed({})".format(seed),
34
+ ]
35
+ }
36
+
37
+ # Ignore this part for now
38
+ # Local seed codes are tensorflow code that are
39
+ # not affected by the global seed and sometimes require us
40
+ # to define the seed in the function call
41
+ data["local_seed_code"] = [
42
+ "train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed={}, image_size=(img_height, img_width), batch_size=batch_size)".format(seed),
43
+ "val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed={}, image_size=(img_height, img_width), batch_size=batch_size)".format(seed),
44
+ ]
45
+
46
+ if mode == "gpu":
47
+ data["gpu_cpu_parallelism_ops"] = [
48
+ "os.environ['TF_DETERMINISTIC_OPS'] = '1'",
49
+ "os.environ['TF_CUDNN_DETERMINISTIC'] = '1'"
50
+ ]
51
+ elif mode == "cpu":
52
+ data["gpu_cpu_parallelism_ops"] = [
53
+ "tf.config.threading.set_inter_op_parallelism_threads(1)"
54
+ ]
55
+ else:
56
+ raise Exception("Error: unknown 'mode' value, expected 'gpu' or 'cpu'")
57
+
58
+ # Get installed packages using importlib.metadata
59
+ installed_packages_list = []
60
+ for dist in md.distributions():
61
+ name = dist.metadata.get("Name") or "unknown"
62
+ version = dist.version
63
+ installed_packages_list.append(f"{name}=={version}")
64
+ installed_packages_list = sorted(installed_packages_list)
65
+
66
+ data["session_runtime_info"] = {
67
+ "installed_packages": installed_packages_list,
68
+ "python_version": sys.version,
69
+ }
70
+
71
+ with open(os.path.join(directory, "reproducibility.json"), "w") as fp:
72
+ json.dump(data, fp)
73
+
74
+ return print("Your reproducibility environment is now saved to 'reproducibility.json'")
75
+
76
+ def set_reproducibility_env(reproducibility_env):
77
+ # Change the input into dict / json
78
+ for global_code in reproducibility_env["global_seed_code"]:
79
+ exec("%s" % (global_code))
80
+
81
+ for parallelism_ops in reproducibility_env["gpu_cpu_parallelism_ops"]:
82
+ exec("%s" % (parallelism_ops))
83
+
84
+ def import_reproducibility_env(reproducibility_env_file):
85
+ with open(reproducibility_env_file) as json_file:
86
+ reproducibility_env = json.load(json_file)
87
+ set_reproducibility_env(reproducibility_env)
88
+
89
+ print("Your reproducibility environment is successfully setup")
90
+
91
+ def import_reproducibility_env_from_competition_model(apiurl,version,submission_type):
92
+ # Confirm that creds are loaded, print warning if not
93
+ if all(["username" in os.environ,
94
+ "password" in os.environ]):
95
+ pass
96
+ else:
97
+ return print("Credentials not found. Please provide credentials with set_credentials().")
98
+
99
+ post_dict = {
100
+ "y_pred": [],
101
+ "return_eval": "False",
102
+ "return_y": "False",
103
+ "inspect_model": "False",
104
+ "version": "None",
105
+ "compare_models": "False",
106
+ "version_list": "None",
107
+ "get_leaderboard": "False",
108
+ "instantiate_model": "False",
109
+ "reproduce": "True",
110
+ "trained": "False",
111
+ "model_version": version,
112
+ "submission_type": submission_type
113
+ }
114
+
115
+ headers = { 'Content-Type':'application/json', 'authorizationToken': os.environ.get("AWS_TOKEN"),}
116
+
117
+ apiurl_eval=apiurl[:-1]+"eval"
118
+
119
+ resp = requests.post(apiurl_eval,headers=headers,data=json.dumps(post_dict))
120
+
121
+ # Check for appropriate response from Lambda.
122
+ try :
123
+ resp.raise_for_status()
124
+ except requests.exceptions.HTTPError :
125
+ raise Exception(f"Error: Received {resp.status_code} from AWS, Please check if Model Version is correct.")
126
+
127
+ # Load Dictionary
128
+ resp_dict = json.loads(resp.text)
129
+
130
+ # Check if key,value pair exists
131
+ if resp_dict['reproducibility_env'] != None:
132
+ set_reproducibility_env(resp_dict['reproducibility_env'])
133
+ # Store version of reproducibility_environment_set
134
+ os.environ['reproducibility_environment_version'] = str(version)
135
+ print("Your reproducibility environment is successfully setup. Please setup data and then call `replicate_model` ")
136
+ else:
137
+ print("Reproducibility environment is not found")
138
+
139
+
140
+ def import_reproducibility_env_from_model(apiurl):
141
+ if all(["AWS_ACCESS_KEY_ID_AIMS" in os.environ,
142
+ "AWS_SECRET_ACCESS_KEY_AIMS" in os.environ,
143
+ "AWS_REGION_AIMS" in os.environ,
144
+ "username" in os.environ,
145
+ "password" in os.environ]):
146
+ pass
147
+ else:
148
+ return print("'Instantiate Model' unsuccessful. Please provide credentials with set_credentials().")
149
+
150
+ aws_client = get_aws_client()
151
+ reproducibility_env_filename = "/runtime_reproducibility.json"
152
+
153
+ # Get bucket and model_id for user
154
+ response, error = run_function_on_lambda(
155
+ apiurl, **{"delete": "FALSE", "versionupdateget": "TRUE"}
156
+ )
157
+ if error is not None:
158
+ raise error
159
+
160
+ _, bucket, model_id = json.loads(response.content.decode("utf-8"))
161
+
162
+ try:
163
+ resp_string = aws_client["client"].get_object(
164
+ Bucket=bucket, Key=model_id + reproducibility_env_filename
165
+ )
166
+
167
+ reproducibility_env_string = resp_string['Body'].read()
168
+
169
+ except Exception as err:
170
+ print("This model was not deployed with reproducibility support")
171
+ raise err
172
+
173
+ # generate tempfile for onnx object
174
+ temp_dir = tempfile.gettempdir()
175
+ temp_path = os.path.join(temp_dir, 'temp_file_name')
176
+
177
+ # save onnx to temporary path
178
+ with open(temp_path, "wb") as f:
179
+ f.write(reproducibility_env_string)
180
+
181
+ import_reproducibility_env(temp_path)
@@ -0,0 +1,8 @@
1
+ FROM public.ecr.aws/lambda/python:$python_version
2
+
3
+ COPY $directory/. ./
4
+ COPY $requirements_file_path ./
5
+
6
+ RUN pip install -r ./requirements.txt
7
+
8
+ CMD ["lambda_function.lambda_handler"]
@@ -0,0 +1,24 @@
1
+ FROM public.ecr.aws/lambda/python:$python_version
2
+
3
+ RUN yum -y install java-1.8.0-openjdk
4
+
5
+ COPY $directory/. ./
6
+ COPY $requirements_file_path ./
7
+
8
+ RUN pip install -r ./requirements.txt
9
+
10
+ ENV SPARK_HOME="/var/lang/lib/python$python_version/site-packages/pyspark"
11
+ ENV PATH=$PATH:$SPARK_HOME/bin
12
+ ENV PATH=$PATH:$SPARK_HOME/sbin
13
+ ENV PYTHONPATH=$SPARK_HOME/python:$SPARK_HOME/python/lib/py4j-0.10.9-src.zip:$PYTHONPATH
14
+ ENV PATH=$SPARK_HOME/python:$PATH
15
+ ENV SPARK_MASTER_HOST="localhost"
16
+ ENV SPARK_LOCAL_IP="127.0.0.1"
17
+
18
+ ENV JAVA_HOME="/usr/lib/jvm/java-1.8.0-openjdk*/jre"
19
+ ENV PATH=$PATH:$JAVA_HOME/bin
20
+
21
+ COPY spark-class $SPARK_HOME/bin/
22
+ RUN chmod +x $SPARK_HOME/bin/spark-class
23
+
24
+ CMD ["lambda_function.lambda_handler"]
@@ -0,0 +1 @@
1
+
@@ -0,0 +1,11 @@
1
+ version: 0.2
2
+
3
+ phases:
4
+ pre_build:
5
+ commands:
6
+ - aws ecr get-login-password --region $region | docker login --username AWS --password-stdin $account_id.dkr.ecr.$region.amazonaws.com
7
+ build:
8
+ commands:
9
+ - sam build
10
+ - sam package --output-template-file packaged-template.yml --image-repository $account_id.dkr.ecr.$region.amazonaws.com/$repository_name
11
+ - sam deploy --template-file packaged-template.yml --stack-name $stack_name --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM CAPABILITY_AUTO_EXPAND --region $region --image-repository $account_id.dkr.ecr.$region.amazonaws.com/$repository_name