aimodelshare 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (171) hide show
  1. aimodelshare/README.md +26 -0
  2. aimodelshare/__init__.py +100 -0
  3. aimodelshare/aimsonnx.py +2381 -0
  4. aimodelshare/api.py +836 -0
  5. aimodelshare/auth.py +163 -0
  6. aimodelshare/aws.py +511 -0
  7. aimodelshare/aws_client.py +173 -0
  8. aimodelshare/base_image.py +154 -0
  9. aimodelshare/bucketpolicy.py +106 -0
  10. aimodelshare/color_mappings/color_mapping_keras.csv +121 -0
  11. aimodelshare/color_mappings/color_mapping_pytorch.csv +117 -0
  12. aimodelshare/containerisation.py +244 -0
  13. aimodelshare/containerization.py +712 -0
  14. aimodelshare/containerization_templates/Dockerfile.txt +8 -0
  15. aimodelshare/containerization_templates/Dockerfile_PySpark.txt +23 -0
  16. aimodelshare/containerization_templates/buildspec.txt +14 -0
  17. aimodelshare/containerization_templates/lambda_function.txt +40 -0
  18. aimodelshare/custom_approach/__init__.py +1 -0
  19. aimodelshare/custom_approach/lambda_function.py +17 -0
  20. aimodelshare/custom_eval_metrics.py +103 -0
  21. aimodelshare/data_sharing/__init__.py +0 -0
  22. aimodelshare/data_sharing/data_sharing_templates/Dockerfile.txt +3 -0
  23. aimodelshare/data_sharing/data_sharing_templates/__init__.py +1 -0
  24. aimodelshare/data_sharing/data_sharing_templates/buildspec.txt +15 -0
  25. aimodelshare/data_sharing/data_sharing_templates/codebuild_policies.txt +129 -0
  26. aimodelshare/data_sharing/data_sharing_templates/codebuild_trust_relationship.txt +12 -0
  27. aimodelshare/data_sharing/download_data.py +620 -0
  28. aimodelshare/data_sharing/share_data.py +373 -0
  29. aimodelshare/data_sharing/utils.py +8 -0
  30. aimodelshare/deploy_custom_lambda.py +246 -0
  31. aimodelshare/documentation/Makefile +20 -0
  32. aimodelshare/documentation/karma_sphinx_theme/__init__.py +28 -0
  33. aimodelshare/documentation/karma_sphinx_theme/_version.py +2 -0
  34. aimodelshare/documentation/karma_sphinx_theme/breadcrumbs.html +70 -0
  35. aimodelshare/documentation/karma_sphinx_theme/layout.html +172 -0
  36. aimodelshare/documentation/karma_sphinx_theme/search.html +50 -0
  37. aimodelshare/documentation/karma_sphinx_theme/searchbox.html +14 -0
  38. aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css +2 -0
  39. aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css.map +1 -0
  40. aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css +2751 -0
  41. aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css.map +1 -0
  42. aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css +2 -0
  43. aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css.map +1 -0
  44. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.eot +0 -0
  45. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.svg +32 -0
  46. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.ttf +0 -0
  47. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff +0 -0
  48. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff2 +0 -0
  49. aimodelshare/documentation/karma_sphinx_theme/static/js/theme.js +68 -0
  50. aimodelshare/documentation/karma_sphinx_theme/theme.conf +9 -0
  51. aimodelshare/documentation/make.bat +35 -0
  52. aimodelshare/documentation/requirements.txt +2 -0
  53. aimodelshare/documentation/source/about.rst +18 -0
  54. aimodelshare/documentation/source/advanced_features.rst +137 -0
  55. aimodelshare/documentation/source/competition.rst +218 -0
  56. aimodelshare/documentation/source/conf.py +58 -0
  57. aimodelshare/documentation/source/create_credentials.rst +86 -0
  58. aimodelshare/documentation/source/example_notebooks.rst +132 -0
  59. aimodelshare/documentation/source/functions.rst +151 -0
  60. aimodelshare/documentation/source/gettingstarted.rst +390 -0
  61. aimodelshare/documentation/source/images/creds1.png +0 -0
  62. aimodelshare/documentation/source/images/creds2.png +0 -0
  63. aimodelshare/documentation/source/images/creds3.png +0 -0
  64. aimodelshare/documentation/source/images/creds4.png +0 -0
  65. aimodelshare/documentation/source/images/creds5.png +0 -0
  66. aimodelshare/documentation/source/images/creds_file_example.png +0 -0
  67. aimodelshare/documentation/source/images/predict_tab.png +0 -0
  68. aimodelshare/documentation/source/index.rst +110 -0
  69. aimodelshare/documentation/source/modelplayground.rst +132 -0
  70. aimodelshare/exceptions.py +11 -0
  71. aimodelshare/generatemodelapi.py +1270 -0
  72. aimodelshare/iam/codebuild_policy.txt +129 -0
  73. aimodelshare/iam/codebuild_trust_relationship.txt +12 -0
  74. aimodelshare/iam/lambda_policy.txt +15 -0
  75. aimodelshare/iam/lambda_trust_relationship.txt +12 -0
  76. aimodelshare/json_templates/__init__.py +1 -0
  77. aimodelshare/json_templates/api_json.txt +155 -0
  78. aimodelshare/json_templates/auth/policy.txt +1 -0
  79. aimodelshare/json_templates/auth/role.txt +1 -0
  80. aimodelshare/json_templates/eval/policy.txt +1 -0
  81. aimodelshare/json_templates/eval/role.txt +1 -0
  82. aimodelshare/json_templates/function/policy.txt +1 -0
  83. aimodelshare/json_templates/function/role.txt +1 -0
  84. aimodelshare/json_templates/integration_response.txt +5 -0
  85. aimodelshare/json_templates/lambda_policy_1.txt +15 -0
  86. aimodelshare/json_templates/lambda_policy_2.txt +8 -0
  87. aimodelshare/json_templates/lambda_role_1.txt +12 -0
  88. aimodelshare/json_templates/lambda_role_2.txt +16 -0
  89. aimodelshare/leaderboard.py +174 -0
  90. aimodelshare/main/1.txt +132 -0
  91. aimodelshare/main/1B.txt +112 -0
  92. aimodelshare/main/2.txt +153 -0
  93. aimodelshare/main/3.txt +134 -0
  94. aimodelshare/main/4.txt +128 -0
  95. aimodelshare/main/5.txt +109 -0
  96. aimodelshare/main/6.txt +105 -0
  97. aimodelshare/main/7.txt +144 -0
  98. aimodelshare/main/8.txt +142 -0
  99. aimodelshare/main/__init__.py +1 -0
  100. aimodelshare/main/authorization.txt +275 -0
  101. aimodelshare/main/eval_classification.txt +79 -0
  102. aimodelshare/main/eval_lambda.txt +1709 -0
  103. aimodelshare/main/eval_regression.txt +80 -0
  104. aimodelshare/main/lambda_function.txt +8 -0
  105. aimodelshare/main/nst.txt +149 -0
  106. aimodelshare/model.py +1543 -0
  107. aimodelshare/modeluser.py +215 -0
  108. aimodelshare/moral_compass/README.md +408 -0
  109. aimodelshare/moral_compass/__init__.py +65 -0
  110. aimodelshare/moral_compass/_version.py +3 -0
  111. aimodelshare/moral_compass/api_client.py +601 -0
  112. aimodelshare/moral_compass/apps/__init__.py +69 -0
  113. aimodelshare/moral_compass/apps/ai_consequences.py +540 -0
  114. aimodelshare/moral_compass/apps/bias_detective.py +714 -0
  115. aimodelshare/moral_compass/apps/ethical_revelation.py +898 -0
  116. aimodelshare/moral_compass/apps/fairness_fixer.py +889 -0
  117. aimodelshare/moral_compass/apps/judge.py +888 -0
  118. aimodelshare/moral_compass/apps/justice_equity_upgrade.py +853 -0
  119. aimodelshare/moral_compass/apps/mc_integration_helpers.py +820 -0
  120. aimodelshare/moral_compass/apps/model_building_game.py +1104 -0
  121. aimodelshare/moral_compass/apps/model_building_game_beginner.py +687 -0
  122. aimodelshare/moral_compass/apps/moral_compass_challenge.py +858 -0
  123. aimodelshare/moral_compass/apps/session_auth.py +254 -0
  124. aimodelshare/moral_compass/apps/shared_activity_styles.css +349 -0
  125. aimodelshare/moral_compass/apps/tutorial.py +481 -0
  126. aimodelshare/moral_compass/apps/what_is_ai.py +853 -0
  127. aimodelshare/moral_compass/challenge.py +365 -0
  128. aimodelshare/moral_compass/config.py +187 -0
  129. aimodelshare/placeholders/model.onnx +0 -0
  130. aimodelshare/placeholders/preprocessor.zip +0 -0
  131. aimodelshare/playground.py +1968 -0
  132. aimodelshare/postprocessormodules.py +157 -0
  133. aimodelshare/preprocessormodules.py +373 -0
  134. aimodelshare/pyspark/1.txt +195 -0
  135. aimodelshare/pyspark/1B.txt +181 -0
  136. aimodelshare/pyspark/2.txt +220 -0
  137. aimodelshare/pyspark/3.txt +204 -0
  138. aimodelshare/pyspark/4.txt +187 -0
  139. aimodelshare/pyspark/5.txt +178 -0
  140. aimodelshare/pyspark/6.txt +174 -0
  141. aimodelshare/pyspark/7.txt +211 -0
  142. aimodelshare/pyspark/8.txt +206 -0
  143. aimodelshare/pyspark/__init__.py +1 -0
  144. aimodelshare/pyspark/authorization.txt +258 -0
  145. aimodelshare/pyspark/eval_classification.txt +79 -0
  146. aimodelshare/pyspark/eval_lambda.txt +1441 -0
  147. aimodelshare/pyspark/eval_regression.txt +80 -0
  148. aimodelshare/pyspark/lambda_function.txt +8 -0
  149. aimodelshare/pyspark/nst.txt +213 -0
  150. aimodelshare/python/my_preprocessor.py +58 -0
  151. aimodelshare/readme.md +26 -0
  152. aimodelshare/reproducibility.py +181 -0
  153. aimodelshare/sam/Dockerfile.txt +8 -0
  154. aimodelshare/sam/Dockerfile_PySpark.txt +24 -0
  155. aimodelshare/sam/__init__.py +1 -0
  156. aimodelshare/sam/buildspec.txt +11 -0
  157. aimodelshare/sam/codebuild_policies.txt +129 -0
  158. aimodelshare/sam/codebuild_trust_relationship.txt +12 -0
  159. aimodelshare/sam/codepipeline_policies.txt +173 -0
  160. aimodelshare/sam/codepipeline_trust_relationship.txt +12 -0
  161. aimodelshare/sam/spark-class.txt +2 -0
  162. aimodelshare/sam/template.txt +54 -0
  163. aimodelshare/tools.py +103 -0
  164. aimodelshare/utils/__init__.py +78 -0
  165. aimodelshare/utils/optional_deps.py +38 -0
  166. aimodelshare/utils.py +57 -0
  167. aimodelshare-0.3.7.dist-info/METADATA +298 -0
  168. aimodelshare-0.3.7.dist-info/RECORD +171 -0
  169. aimodelshare-0.3.7.dist-info/WHEEL +5 -0
  170. aimodelshare-0.3.7.dist-info/licenses/LICENSE +5 -0
  171. aimodelshare-0.3.7.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1441 @@
1
+ import boto3
2
+ import pandas as pd
3
+ import os
4
+ import json
5
+ import pickle
6
+ import six
7
+ import onnx
8
+ import json
9
+ import argparse
10
+ import logging
11
+ from botocore.exceptions import ClientError
12
+ import requests
13
+ import jwt
14
+ import sys
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ # from aimslambda import analyze_ytest, evaluate_model, inspect_model, compare_models
20
+ # from s3connect import get_ytestdata, get_onnx_mem, get_onnx_temp
21
+
22
+
23
+ ####################################################################
24
+ ########################### main handler ###########################
25
+
26
+ def handler(event, context):
27
+
28
+ body = event["body"]
29
+ if isinstance(body, six.string_types):
30
+ body = json.loads(body)
31
+
32
+ for key, value in body.items():
33
+ if value == "None":
34
+ body[key]=None
35
+
36
+ if body.get("exampledata", "ALL") == "True" or body.get("exampledata", "ALL") == "TRUE":
37
+
38
+ exampledata=get_exampledata(example_data_filename = "exampledata.json")
39
+
40
+ exdata_dict = {"statusCode": 200,
41
+ "headers": {
42
+ "Access-Control-Allow-Origin" : "*",
43
+ "Access-Control-Allow-Credentials": True,
44
+ "Allow" : "GET, OPTIONS, POST",
45
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
46
+ "Access-Control-Allow-Headers" : "*"},
47
+ "body": json.dumps(exampledata)
48
+ }
49
+ return exdata_dict
50
+
51
+ idtoken=event['requestContext']['authorizer']['principalId']
52
+ decoded = jwt.decode(idtoken, options={"verify_signature": False}) # works in PyJWT < v2.0
53
+ email=decoded['email']
54
+ print(email)
55
+ authorized_competitionusers=get_authorizedcompetitionuserdata(example_data_filename = "competitionuserdata.json")
56
+ authorized_emails=authorized_competitionusers.get("emails","no data")
57
+ public=authorized_competitionusers.get("public","FALSE")
58
+
59
+ #TODO: check email agains s3 list and resolve public to true or false from same file
60
+ #Or check if emails on list and if so, check against them, if not assume public project.
61
+ if any([email in authorized_competitionusers['emaillist'],public=="TRUE"]):
62
+
63
+ if body.get("return_eval","ALL") == "True":
64
+ idempotentmodel_version=json.loads(event['requestContext']['authorizer']['uniquemodversion'])
65
+
66
+ ytestdata=get_ytestdata(ytest_s3_filename="ytest.pkl")
67
+
68
+ eval_result = evaluate_model(body, ytestdata)
69
+ bucket="$bucket_name"
70
+ model_id="$unique_model_id"
71
+
72
+ s3_client=boto3.client("s3")
73
+ model_files, err = _get_file_list(s3_client, bucket, model_id)
74
+ print(model_files)
75
+
76
+ mversions=[]
77
+ musers=[]
78
+ mtimestamp=[]
79
+ print(idempotentmodel_version)
80
+ for i in idempotentmodel_version:
81
+ idemresult=i.split("||||")
82
+ mversions.append(idemresult[0])
83
+ musers.append(idemresult[1])
84
+ mtimestamp.append(idemresult[1])
85
+ print(mversions)
86
+ mversions=[int(i) for i in mversions]
87
+
88
+
89
+ newleaderboarddata=[]
90
+ for i in model_files:
91
+ if i.find("mastertable_v")>0:
92
+ newleaderboarddata.append(i)
93
+
94
+ try:
95
+ leaderboard = get_leaderboard("$task_type") #task_type variable used here
96
+ currentversions=leaderboard['version']
97
+ except:
98
+ currentversions=[]
99
+ print("current versions:")
100
+ print(list(currentversions))
101
+ allversions = [sub.split('_v')[1].split('.')[0] for sub in newleaderboarddata]
102
+ print("Named versions in csv files:")
103
+ allversions=[int(i) for i in allversions]
104
+ missingincurrent_leaderboard=list(set(allversions)-set(currentversions))
105
+
106
+ missingingmodelversions=list(set(mversions)-set(currentversions))
107
+
108
+ print("missing model versions for idemp check")
109
+ print(missingingmodelversions)
110
+
111
+ if len(missingingmodelversions)>1:
112
+ idempotentmodel_version=min(missingingmodelversions)
113
+ elif len(missingingmodelversions)==1:
114
+ idempotentmodel_version=missingingmodelversions[0]
115
+ else:
116
+ idempotentmodel_version=0
117
+
118
+
119
+ finalfiles=[]
120
+
121
+ if "model_eval_data_mastertable.csv" not in model_files:
122
+ finalfiles.append("model_eval_data_mastertable.csv")
123
+ finalfiles.append("onnx_model_mostrecent.onnx")
124
+ finalfiles.append("onnx_model_v1.onnx")
125
+ finalfiles.append("predictionmodel_1.onnx")
126
+ finalfiles.append("preprocessor_v1.zip")
127
+ finalfiles.append("reproducibility_v1.json")
128
+ finalfiles.append("model_metadata_v1.json")
129
+ else:
130
+ finalfiles.append("model_eval_data_mastertable_v"+str(idempotentmodel_version)+".csv")
131
+ finalfiles.append("onnx_model_mostrecent.onnx")
132
+ finalfiles.append("onnx_model_v"+str(idempotentmodel_version)+".onnx")
133
+ finalfiles.append("preprocessor_v"+str(idempotentmodel_version)+".zip")
134
+ finalfiles.append("reproducibility_v"+str(idempotentmodel_version)+".json")
135
+ finalfiles.append("model_metadata_v"+str(idempotentmodel_version)+".json")
136
+
137
+ finalfiles.append("inspect_pd_"+str(idempotentmodel_version)+".json")
138
+ finalfiles.append("model_graph_"+str(idempotentmodel_version)+".json")
139
+ print("finalfiles:"+str(finalfiles))
140
+
141
+ #TODO: Change method params with string template params in eval_lambda / also need to change bucket and prefix for file lists
142
+ expires_in = 6000
143
+
144
+ getdict={}
145
+ putdict={}
146
+
147
+ print("idempotentmodel_version: "+str(idempotentmodel_version))
148
+ finalfilesget=finalfiles
149
+ finalfilespost=finalfiles
150
+
151
+ for i in finalfilespost:
152
+ putresult= create_presigned_post(bucket, model_id+"/"+i, expiration=expires_in)
153
+ putdict.update({str(i):str(putresult)})
154
+
155
+ print(finalfilespost)
156
+ for i in finalfilesget:
157
+ if i.find("mastertable")>0:
158
+ indexvalue=finalfilesget.index(i)
159
+ finalfilesget[indexvalue]="model_eval_data_mastertable.csv"
160
+
161
+ print(finalfilesget)
162
+
163
+ for i in finalfilesget:
164
+ method_parameters = {'Bucket': bucket, 'Key': model_id+"/"+i} #repeat for all necessary keys and return dict with nec. end user artifacts.
165
+ getresult= generate_presigned_url(s3_client, 'get_object', method_parameters, expires_in)
166
+ getdict.update({str(i):str(getresult)})
167
+
168
+ print(finalfilesget)
169
+
170
+
171
+ eval_dict = {"statusCode": 200,
172
+ "headers": {
173
+ "Access-Control-Allow-Origin" : "*",
174
+ "Access-Control-Allow-Credentials": True,
175
+ "Allow" : "GET, OPTIONS, POST",
176
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
177
+ "Access-Control-Allow-Headers" : "*"},
178
+ "body": json.dumps({"eval":eval_result,"get":getdict,"put": putdict,"idempotentmodel_version":idempotentmodel_version})
179
+ }
180
+ return eval_dict
181
+
182
+ if body.get("return_y","ALL") == "True":
183
+
184
+ ytestdata=get_ytestdata(ytest_s3_filename="ytest.pkl")
185
+ y_stats = analyze_ytest(ytestdata)
186
+
187
+ ytest_dict = {"statusCode": 200,
188
+ "headers": {
189
+ "Access-Control-Allow-Origin" : "*",
190
+ "Access-Control-Allow-Credentials": True,
191
+ "Allow" : "GET, OPTIONS, POST",
192
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
193
+ "Access-Control-Allow-Headers" : "*"},
194
+ "body": json.dumps(y_stats)
195
+ }
196
+ return ytest_dict
197
+
198
+ if body.get("inspect_model","ALL") == "True":
199
+
200
+ version = body["version"]
201
+
202
+ inspect_pd = inspect_model(version)
203
+
204
+ inspect_dict = {"statusCode": 200,
205
+ "headers": {
206
+ "Access-Control-Allow-Origin" : "*",
207
+ "Access-Control-Allow-Credentials": True,
208
+ "Allow" : "GET, OPTIONS, POST",
209
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
210
+ "Access-Control-Allow-Headers" : "*"},
211
+ "body": json.dumps(inspect_pd.to_dict())
212
+ }
213
+ return inspect_dict
214
+
215
+
216
+ if body.get("compare_models","ALL") == "True":
217
+
218
+ version_list = body["version_list"]
219
+ verbose = body.get("verbose", 1)
220
+ naming_convention = body.get("naming_convention", None)
221
+
222
+
223
+ comp_dict_out = compare_models(version_list, verbose=verbose, naming_convention=naming_convention)
224
+
225
+ compare_dict = {"statusCode": 200,
226
+ "headers": {
227
+ "Access-Control-Allow-Origin" : "*",
228
+ "Access-Control-Allow-Credentials": True,
229
+ "Allow" : "GET, OPTIONS, POST",
230
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
231
+ "Access-Control-Allow-Headers" : "*"},
232
+ "body": json.dumps(comp_dict_out)
233
+ }
234
+ return compare_dict
235
+
236
+
237
+ if body.get("get_leaderboard","ALL") == "True":
238
+
239
+ verbose=body["verbose"]
240
+ columns=body["columns"]
241
+
242
+ leaderboard = get_leaderboard("$task_type", verbose, columns)
243
+
244
+ leaderboard_dict = {"statusCode": 200,
245
+ "headers": {
246
+ "Access-Control-Allow-Origin" : "*",
247
+ "Access-Control-Allow-Credentials": True,
248
+ "Allow" : "GET, OPTIONS, POST",
249
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
250
+ "Access-Control-Allow-Headers" : "*"},
251
+ "body": json.dumps(leaderboard.to_dict())
252
+ }
253
+ return leaderboard_dict
254
+
255
+ if body.get("instantiate_model","ALL") == "True":
256
+ version = body["model_version"]
257
+ reproduce = body["reproduce"]
258
+ trained = body["trained"]
259
+
260
+ reproducibility_env_json = None
261
+ model_weight_url = None
262
+ model_metadata_json = get_model_metadata(version)
263
+
264
+ # the model version is found (users didn't only submit prediction for this version)
265
+ if model_metadata_json:
266
+ if reproduce:
267
+ reproducibility_env_json = get_reproducibility_env(version=version)
268
+ elif trained:
269
+ # Get the presigned url.
270
+ s3_client=boto3.client("s3")
271
+
272
+ bucket = "$bucket_name"
273
+ model_id = "$unique_model_id"
274
+ onnx_model_name = "onnx_model_v{}.onnx".format(version)
275
+
276
+ method_parameters = {
277
+ "Bucket": bucket,
278
+ "Key": model_id + "/" + onnx_model_name,
279
+ }
280
+
281
+ expires_in = 900 # 15 mins
282
+
283
+ presigned_url = generate_presigned_url(s3_client, "get_object", method_parameters, expires_in)
284
+ model_weight_url = str(presigned_url)
285
+ print("Presigned url: {}".format(str(presigned_url)))
286
+
287
+ data = {
288
+ "model_weight_url": model_weight_url,
289
+ "model_metadata": model_metadata_json,
290
+ "reproducibility_env": reproducibility_env_json
291
+ }
292
+ model_dict = {
293
+ "statusCode": 200,
294
+ "headers": {
295
+ "Access-Control-Allow-Origin" : "*",
296
+ "Access-Control-Allow-Credentials": True,
297
+ "Allow" : "GET, OPTIONS, POST",
298
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
299
+ "Access-Control-Allow-Headers" : "*"
300
+ },
301
+ "body": json.dumps(data)
302
+ }
303
+ return model_dict
304
+
305
+ if body.get("leaderboard","ALL") == "TRUE":
306
+
307
+ leaderboard = get_leaderboard("$task_type")
308
+
309
+ leaderboard_dict = {"statusCode": 200,
310
+ "headers": {
311
+ "Access-Control-Allow-Origin" : "*",
312
+ "Access-Control-Allow-Credentials": True,
313
+ "Allow" : "GET, OPTIONS, POST",
314
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
315
+ "Access-Control-Allow-Headers" : "*"},
316
+ "body": leaderboard.to_json(orient="table")
317
+ }
318
+ return leaderboard_dict
319
+ else:
320
+
321
+ if body.get("leaderboard","ALL") == "TRUE":
322
+
323
+ leaderboard = get_leaderboard("$task_type")
324
+
325
+ leaderboard_dict = {"statusCode": 200,
326
+ "headers": {
327
+ "Access-Control-Allow-Origin" : "*",
328
+ "Access-Control-Allow-Credentials": True,
329
+ "Allow" : "GET, OPTIONS, POST",
330
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
331
+ "Access-Control-Allow-Headers" : "*"},
332
+ "body": leaderboard.to_json(orient="table")
333
+ }
334
+ return leaderboard_dict
335
+
336
+ if body.get("compare_models","ALL") == "True":
337
+
338
+ version_list = body["version_list"]
339
+ verbose = body.get("verbose", 1)
340
+ naming_convention = body.get("naming_convention", None)
341
+
342
+ comp_dict_out = compare_models(version_list, verbose=verbose, naming_convention=naming_convention)
343
+
344
+ compare_dict = {"statusCode": 200,
345
+ "headers": {
346
+ "Access-Control-Allow-Origin" : "*",
347
+ "Access-Control-Allow-Credentials": True,
348
+ "Allow" : "GET, OPTIONS, POST",
349
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
350
+ "Access-Control-Allow-Headers" : "*"},
351
+ "body": json.dumps(comp_dict_out)
352
+ }
353
+ return compare_dict
354
+
355
+ else:
356
+ unauthorized_user_dict = {"statusCode": 200,
357
+ "headers": {
358
+ "Access-Control-Allow-Origin" : "*",
359
+ "Access-Control-Allow-Credentials": True,
360
+ "Allow" : "GET, OPTIONS, POST",
361
+ "Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
362
+ "Access-Control-Allow-Headers" : "*"},
363
+ "body": ["Unauthorized user: You do not have access to submit models to, or request data from, this competition."]
364
+ }
365
+ return unauthorized_user_dict
366
+
367
+ ####################################################################
368
+ ######################### aimsonnx lambda ##########################
369
+
370
+
371
+ import sklearn
372
+ from sklearn.metrics import accuracy_score
373
+ from sklearn.metrics import f1_score
374
+ from sklearn.metrics import precision_score
375
+ from sklearn.metrics import recall_score
376
+ from sklearn.metrics import roc_auc_score
377
+ from sklearn.metrics import mean_squared_error
378
+ from sklearn.metrics import r2_score
379
+ from sklearn.metrics import mean_absolute_error
380
+ from collections import Counter
381
+ from math import sqrt
382
+ import json
383
+ import pandas as pd
384
+ import numpy as np
385
+ import ast
386
+ import six
387
+ import gc
388
+ import importlib
389
+ import botocore
390
+
391
+ #from s3connect import get_onnx_mem
392
+
393
+ def analyze_ytest(ytestdata, task_type="$task_type"):
394
+
395
+ if task_type=="classification":
396
+
397
+ class_labels = list(set(ytestdata))
398
+ class_balance = Counter(ytestdata)
399
+ label_dtypes = Counter([str(type(i)) for i in ytestdata])
400
+
401
+ y_stats = {"ytest_example": ytestdata[0:5],
402
+ "y_length": len(ytestdata),
403
+ "class_labels": class_labels,
404
+ "class_balance": class_balance,
405
+ "label_dtypes": label_dtypes}
406
+
407
+ else:
408
+ y_mean = np.mean(ytestdata)
409
+ y_min = np.min(ytestdata)
410
+ y_max = np.max(ytestdata)
411
+ y_sd = np.std(ytestdata)
412
+
413
+ y_stats = {"ytest_example": ytestdata[0:5],
414
+ "y_length": len(ytestdata),
415
+ "y_mean": y_mean,
416
+ "y_min": y_min,
417
+ "y_max": y_max,
418
+ "y_sd": y_sd}
419
+
420
+ return y_stats
421
+
422
+
423
+ def model_eval_metrics(y_true, y_pred, task_type="$task_type"):
424
+
425
+ if task_type=="classification":
426
+ try:
427
+ accuracy_eval = accuracy_score(y_true, y_pred)
428
+ except:
429
+ accuracy_eval = None
430
+
431
+ try:
432
+ f1_score_eval = f1_score(y_true, y_pred,average="macro",zero_division=0)
433
+ except:
434
+ f1_score_eval = None
435
+
436
+ try:
437
+ precision_eval = precision_score(y_true, y_pred,average="macro",zero_division=0)
438
+ except:
439
+ precision_eval = None
440
+
441
+ try:
442
+ recall_eval = recall_score(y_true, y_pred,average="macro",zero_division=0)
443
+ except:
444
+ recall_eval = None
445
+
446
+ mse_eval = None
447
+ rmse_eval = None
448
+ mae_eval = None
449
+ r2_eval = None
450
+
451
+ metricdata = {'accuracy': [accuracy_eval], 'f1_score': [f1_score_eval], 'precision': [precision_eval], 'recall': [recall_eval], 'mse': [mse_eval], 'rmse': [rmse_eval], 'mae': [mae_eval], 'r2': [r2_eval]}
452
+
453
+ else:
454
+
455
+ try:
456
+ mse_eval = mean_squared_error(y_true, y_pred)
457
+ except:
458
+ mse_eval = None
459
+
460
+ try:
461
+ rmse_eval = sqrt(mean_squared_error(y_true, y_pred))
462
+ except:
463
+ rmse_eval = None
464
+
465
+ try:
466
+ mae_eval = mean_absolute_error(y_true, y_pred)
467
+ except:
468
+ mae_eval = None
469
+
470
+ try:
471
+ r2_eval = r2_score(y_true, y_pred)
472
+ except:
473
+ r2_eval = None
474
+
475
+ accuracy_eval = None
476
+ f1_score_eval = None
477
+ precision_eval = None
478
+ recall_eval = None
479
+
480
+ metricdata = {'accuracy': [accuracy_eval], 'f1_score': [f1_score_eval], 'precision': [precision_eval], 'recall': [recall_eval], 'mse': [mse_eval], 'rmse': [rmse_eval], 'mae': [mae_eval], 'r2': [r2_eval]}
481
+
482
+ s3 = boto3.resource("s3")
483
+ bucket = s3.Bucket("$bucket_name")
484
+
485
+ metrics_files = bucket.objects.filter(Prefix= "$unique_model_id"+"/metrics_")
486
+
487
+
488
+ if metrics_files:
489
+
490
+ for i in metrics_files:
491
+
492
+ file = i.key.split('/')[-1]
493
+
494
+ eval_metric = get_eval_metric(eval_metric_s3_filename=file)
495
+ custom_eval = eval_metric(y_true, y_pred)
496
+
497
+ if isinstance(custom_eval, dict):
498
+
499
+ for i in custom_eval.keys():
500
+
501
+ metricdata[i] = [custom_eval[i]]
502
+
503
+ else:
504
+
505
+ metricdata[file.replace("metrics_","").replace(".zip", "")] = [custom_eval]
506
+
507
+ finalmetricdata = pd.DataFrame.from_dict(metricdata)
508
+
509
+ return finalmetricdata.to_dict('records')[0]
510
+
511
+
512
+ def evaluate_model(body, ytestdata):
513
+
514
+ if isinstance(body["y_pred"], six.string_types):
515
+ prediction_list = json.loads(body["y_pred"])
516
+ else:
517
+ prediction_list = body["y_pred"]
518
+
519
+ result=model_eval_metrics(ytestdata,prediction_list,task_type="$task_type")
520
+ return result
521
+
522
+
523
+ def inspect_model(version):
524
+
525
+ s3 = boto3.resource('s3')
526
+ obj = s3.Object("$bucket_name", "$unique_model_id/"+"inspect_pd_"+str(version)+".json")
527
+ data = obj.get()['Body'].read()
528
+ model_dict = json.loads(data)
529
+
530
+ ml_framework = model_dict.get(str(version))['ml_framework']
531
+ model_type = model_dict.get(str(version))['model_type']
532
+ inspect_pd = pd.DataFrame(model_dict.get(str(version))['model_dict'])
533
+
534
+ return inspect_pd
535
+
536
+
537
+
538
+ def _model_summary(meta_dict, from_onnx=False):
539
+ '''Creates model summary table from model metadata dict.'''
540
+
541
+ assert(isinstance(meta_dict, dict)), \
542
+ "Please pass valid metadata dict."
543
+
544
+ assert('model_architecture' in meta_dict.keys()), \
545
+ "Please make sure model architecture data is included."
546
+
547
+ if from_onnx == True:
548
+ architecture = meta_dict['metadata_onnx']["model_architecture"]
549
+ else:
550
+ architecture = meta_dict["model_architecture"]
551
+
552
+
553
+ model_summary = pd.DataFrame({'Layer':architecture['layers_sequence'],
554
+ #'Activation':architecture['activations_sequence'],
555
+ 'Shape':architecture['layers_shapes'],
556
+ 'Params':architecture['layers_n_params']})
557
+
558
+ return model_summary
559
+
560
+
561
+ def _get_metadata(onnx_model):
562
+ '''Fetches previously extracted model metadata from ONNX object
563
+ and returns model metadata dict.'''
564
+
565
+ # double check this
566
+ #assert(isinstance(onnx_model, onnx.onnx_ml_pb2.ModelProto)), \
567
+ #"Please pass a onnx model object."
568
+
569
+ try:
570
+ onnx_meta = onnx_model.metadata_props
571
+
572
+ onnx_meta_dict = {'model_metadata': ''}
573
+
574
+ for i in onnx_meta:
575
+ onnx_meta_dict[i.key] = i.value
576
+
577
+ onnx_meta_dict = ast.literal_eval(onnx_meta_dict['model_metadata'])
578
+
579
+ #if onnx_meta_dict['model_config'] != None and \
580
+ #onnx_meta_dict['ml_framework'] != 'pytorch':
581
+ # onnx_meta_dict['model_config'] = ast.literal_eval(onnx_meta_dict['model_config'])
582
+
583
+ if onnx_meta_dict['model_architecture'] != None:
584
+ onnx_meta_dict['model_architecture'] = ast.literal_eval(onnx_meta_dict['model_architecture'])
585
+
586
+ if onnx_meta_dict['metadata_onnx'] != None:
587
+ onnx_meta_dict['metadata_onnx'] = ast.literal_eval(onnx_meta_dict['metadata_onnx'])
588
+
589
+ # onnx_meta_dict['model_image'] = onnx_to_image(onnx_model) # didnt want to include image dependencies in lambda
590
+
591
+ except Exception as e:
592
+
593
+ print(e)
594
+
595
+ onnx_meta_dict = ast.literal_eval(onnx_meta_dict)
596
+
597
+ return onnx_meta_dict
598
+
599
+
600
+ def compare_models(version_list, by_model_type=None, best_model=None, verbose=1, naming_convention=None):
601
+
602
+ ml_framework_list = []
603
+ model_type_list = []
604
+ model_dict_list = []
605
+ model_dict = {}
606
+
607
+ for i in version_list:
608
+
609
+ s3 = boto3.resource('s3')
610
+ obj = s3.Object("$bucket_name", "$unique_model_id/"+"inspect_pd_"+str(i)+".json")
611
+ data = obj.get()['Body'].read()
612
+ model_dict_temp = json.loads(data)
613
+
614
+ ml_framework_list.append(model_dict_temp[str(i)]['ml_framework'])
615
+ model_type_list.append(model_dict_temp[str(i)]['model_type'])
616
+ model_dict_list.append(model_dict_temp[str(i)]['model_dict'])
617
+
618
+ model_dict[str(i)] = model_dict_temp[str(i)]
619
+
620
+
621
+ comp_dict_out = {}
622
+ comp_pd_nn = pd.DataFrame()
623
+
624
+
625
+ for i, j in zip(version_list, ml_framework_list):
626
+
627
+ if j == "sklearn" or j == "pyspark":
628
+
629
+ temp_pd = pd.DataFrame(model_dict[str(i)]['model_dict'])
630
+ temp_pd.columns = ['param_name', 'default_value', "model_version_"+str(i)]
631
+
632
+ if model_dict[str(i)]['model_type'] in comp_dict_out.keys():
633
+
634
+ comp_pd = pd.read_json(comp_dict_out[model_dict[str(i)]['model_type']])
635
+ comp_pd = comp_pd.merge(temp_pd.drop('default_value', axis=1), on='param_name')
636
+
637
+ comp_dict_out[model_dict[str(i)]['model_type']] = comp_pd.to_json()
638
+
639
+ else:
640
+ comp_dict_out[model_dict[str(i)]['model_type']] = temp_pd.to_json()
641
+
642
+
643
+ elif j == "keras" or j == 'pytorch':
644
+
645
+ temp_pd_nn = pd.DataFrame(model_dict[str(i)]['model_dict'])
646
+
647
+ temp_pd_nn.iloc[:,2] = temp_pd_nn.iloc[:,2].astype(str)
648
+
649
+ if verbose == 0:
650
+ temp_pd_nn = temp_pd_nn[['Layer']]
651
+ elif verbose == 1:
652
+ temp_pd_nn = temp_pd_nn[['Layer', 'Shape', 'Params']]
653
+ elif verbose == 2:
654
+ temp_pd_nn = temp_pd_nn[['Name', 'Layer', 'Shape', 'Params', 'Connect']]
655
+ elif verbose == 3:
656
+ temp_pd_nn = temp_pd_nn[['Name', 'Layer', 'Shape', 'Params', 'Connect', 'Activation']]
657
+
658
+ if naming_convention == 'pytorch':
659
+ temp_pd_nn['Layer'] = rename_layers(temp_pd_nn['Layer'], direction="keras_to_torch", activation=False)
660
+
661
+ if naming_convention == 'keras':
662
+ temp_pd_nn['Layer'] = rename_layers(temp_pd_nn['Layer'], direction="torch_to_keras", activation=False)
663
+
664
+ temp_pd_nn = temp_pd_nn.add_prefix('Model_'+str(i)+'_')
665
+
666
+ comp_pd_nn = pd.concat([comp_pd_nn, temp_pd_nn], axis=1)
667
+
668
+ comp_dict_out["nn"] = comp_pd_nn.to_json()
669
+
670
+ elif j == "undefined":
671
+
672
+ comp_dict_out["undefined_"+str(i)] = pd.DataFrame({'param_name':[], 'default_value':[], 'model_version_'+str(i):[]}).to_json()
673
+
674
+ return comp_dict_out
675
+
676
+
677
+ def model_from_string(model_type):
678
+ models_modules_dict = {'ABCMeta': 'sklearn.naive_bayes',
679
+ 'ARDRegression': 'sklearn.linear_model',
680
+ 'AdaBoostClassifier': 'sklearn.ensemble',
681
+ 'AdaBoostRegressor': 'sklearn.ensemble',
682
+ 'BaggingClassifier': 'sklearn.ensemble',
683
+ 'BaggingRegressor': 'sklearn.ensemble',
684
+ 'BallTree': 'sklearn.neighbors',
685
+ 'BaseDecisionTree': 'sklearn.tree',
686
+ 'BaseEnsemble': 'sklearn.ensemble',
687
+ 'BaseEstimator': 'sklearn.naive_bayes',
688
+ 'BayesianGaussianMixture': 'sklearn.mixture',
689
+ 'BayesianRidge': 'sklearn.linear_model',
690
+ 'BernoulliNB': 'sklearn.naive_bayes',
691
+ 'BernoulliRBM': 'sklearn.neural_network',
692
+ 'CategoricalNB': 'sklearn.naive_bayes',
693
+ 'ClassifierMixin': 'sklearn.naive_bayes',
694
+ 'ComplementNB': 'sklearn.naive_bayes',
695
+ 'DecisionTreeClassifier': 'sklearn.tree',
696
+ 'DecisionTreeRegressor': 'sklearn.tree',
697
+ 'DistanceMetric': 'sklearn.neighbors',
698
+ 'ElasticNet': 'sklearn.linear_model',
699
+ 'ElasticNetCV': 'sklearn.linear_model',
700
+ 'ExtraTreeClassifier': 'sklearn.tree',
701
+ 'ExtraTreeRegressor': 'sklearn.tree',
702
+ 'ExtraTreesClassifier': 'sklearn.ensemble',
703
+ 'ExtraTreesRegressor': 'sklearn.ensemble',
704
+ 'GammaRegressor': 'sklearn.linear_model',
705
+ 'GaussianMixture': 'sklearn.mixture',
706
+ 'GaussianNB': 'sklearn.naive_bayes',
707
+ 'GaussianProcessClassifier': 'sklearn.gaussian_process',
708
+ 'GaussianProcessRegressor': 'sklearn.gaussian_process',
709
+ 'GradientBoostingClassifier': 'sklearn.ensemble',
710
+ 'GradientBoostingRegressor': 'sklearn.ensemble',
711
+ 'Hinge': 'sklearn.linear_model',
712
+ 'Huber': 'sklearn.linear_model',
713
+ 'HuberRegressor': 'sklearn.linear_model',
714
+ 'IsolationForest': 'sklearn.ensemble',
715
+ 'IsotonicRegression': 'sklearn.isotonic',
716
+ 'KDTree': 'sklearn.neighbors',
717
+ 'KNeighborsClassifier': 'sklearn.neighbors',
718
+ 'KNeighborsRegressor': 'sklearn.neighbors',
719
+ 'KNeighborsTransformer': 'sklearn.neighbors',
720
+ 'KernelDensity': 'sklearn.neighbors',
721
+ 'LabelBinarizer': 'sklearn.naive_bayes',
722
+ 'Lars': 'sklearn.linear_model',
723
+ 'LarsCV': 'sklearn.linear_model',
724
+ 'Lasso': 'sklearn.linear_model',
725
+ 'LassoCV': 'sklearn.linear_model',
726
+ 'LassoLars': 'sklearn.linear_model',
727
+ 'LassoLarsCV': 'sklearn.linear_model',
728
+ 'LassoLarsIC': 'sklearn.linear_model',
729
+ 'LinearRegression': 'sklearn.linear_model',
730
+ 'LinearSVC': 'sklearn.svm',
731
+ 'LinearSVR': 'sklearn.svm',
732
+ 'LocalOutlierFactor': 'sklearn.neighbors',
733
+ 'Log': 'sklearn.linear_model',
734
+ 'LogisticRegression': 'sklearn.linear_model',
735
+ 'LogisticRegressionCV': 'sklearn.linear_model',
736
+ 'MLPClassifier': 'sklearn.neural_network',
737
+ 'MLPRegressor': 'sklearn.neural_network',
738
+ 'MetaEstimatorMixin': 'sklearn.multiclass',
739
+ 'ModifiedHuber': 'sklearn.linear_model',
740
+ 'MultiOutputMixin': 'sklearn.multiclass',
741
+ 'MultiTaskElasticNet': 'sklearn.linear_model',
742
+ 'MultiTaskElasticNetCV': 'sklearn.linear_model',
743
+ 'MultiTaskLasso': 'sklearn.linear_model',
744
+ 'MultiTaskLassoCV': 'sklearn.linear_model',
745
+ 'MultinomialNB': 'sklearn.naive_bayes',
746
+ 'NearestCentroid': 'sklearn.neighbors',
747
+ 'NearestNeighbors': 'sklearn.neighbors',
748
+ 'NeighborhoodComponentsAnalysis': 'sklearn.neighbors',
749
+ 'NotFittedError': 'sklearn.multiclass',
750
+ 'NuSVC': 'sklearn.svm',
751
+ 'NuSVR': 'sklearn.svm',
752
+ 'OneClassSVM': 'sklearn.svm',
753
+ 'OneVsOneClassifier': 'sklearn.multiclass',
754
+ 'OneVsRestClassifier': 'sklearn.multiclass',
755
+ 'OrthogonalMatchingPursuit': 'sklearn.linear_model',
756
+ 'OrthogonalMatchingPursuitCV': 'sklearn.linear_model',
757
+ 'OutputCodeClassifier': 'sklearn.multiclass',
758
+ 'Parallel': 'sklearn.multiclass',
759
+ 'PassiveAggressiveClassifier': 'sklearn.linear_model',
760
+ 'PassiveAggressiveRegressor': 'sklearn.linear_model',
761
+ 'Perceptron': 'sklearn.linear_model',
762
+ 'PoissonRegressor': 'sklearn.linear_model',
763
+ 'RANSACRegressor': 'sklearn.linear_model',
764
+ 'RadiusNeighborsClassifier': 'sklearn.neighbors',
765
+ 'RadiusNeighborsRegressor': 'sklearn.neighbors',
766
+ 'RadiusNeighborsTransformer': 'sklearn.neighbors',
767
+ 'RandomForestClassifier': 'sklearn.ensemble',
768
+ 'RandomForestRegressor': 'sklearn.ensemble',
769
+ 'RandomTreesEmbedding': 'sklearn.ensemble',
770
+ 'RegressorMixin': 'sklearn.isotonic',
771
+ 'Ridge': 'sklearn.linear_model',
772
+ 'RidgeCV': 'sklearn.linear_model',
773
+ 'RidgeClassifier': 'sklearn.linear_model',
774
+ 'RidgeClassifierCV': 'sklearn.linear_model',
775
+ 'SGDClassifier': 'sklearn.linear_model',
776
+ 'SGDRegressor': 'sklearn.linear_model',
777
+ 'SVC': 'sklearn.svm',
778
+ 'SVR': 'sklearn.svm',
779
+ 'SquaredLoss': 'sklearn.linear_model',
780
+ 'StackingClassifier': 'sklearn.ensemble',
781
+ 'StackingRegressor': 'sklearn.ensemble',
782
+ 'TheilSenRegressor': 'sklearn.linear_model',
783
+ 'TransformerMixin': 'sklearn.isotonic',
784
+ 'TweedieRegressor': 'sklearn.linear_model',
785
+ 'VotingClassifier': 'sklearn.ensemble',
786
+ 'VotingRegressor': 'sklearn.ensemble'}
787
+
788
+ module = models_modules_dict[model_type]
789
+ model_class = getattr(importlib.import_module(module), model_type)
790
+ return model_class
791
+
792
+ def get_leaderboard(task_type="$task_type", verbose=3, columns=None):
793
+ bucket="$bucket_name"
794
+ model_id="$unique_model_id"
795
+
796
+ s3_client=boto3.client("s3")
797
+ model_files, err = _get_file_list(s3_client, bucket, model_id)
798
+ print(model_files)
799
+
800
+ newleaderboarddata=[]
801
+ for i in model_files:
802
+ if i.find("mastertable_v")>0:
803
+ newleaderboarddata.append(i)
804
+
805
+ s3 = boto3.resource("s3")
806
+ bucketres = s3.Bucket("$bucket_name")
807
+ with open("/tmp/"+"model_eval_data_mastertable.csv", "wb") as lbfo:
808
+ bucketres.download_fileobj("$unique_model_id/"+"model_eval_data_mastertable.csv", lbfo)
809
+
810
+
811
+
812
+ leaderboard = pd.read_csv("/tmp/"+"model_eval_data_mastertable.csv", sep="\t")
813
+ currentversions=leaderboard['version']
814
+ print("current versions:")
815
+ print(list(currentversions))
816
+ allversions = [sub.split('_v')[1].split('.')[0] for sub in newleaderboarddata]
817
+ print("Named versions in csv files:")
818
+ allversions=[int(i) for i in allversions]
819
+ missingincurrent_leaderboard=list(set(allversions)-set(currentversions))
820
+ print(missingincurrent_leaderboard)
821
+
822
+ #TODO: check if items in leaderboard, if so, then do following
823
+ if len(missingincurrent_leaderboard)>0:
824
+ for i in missingincurrent_leaderboard:
825
+ with open("/tmp/"+"model_eval_data_mastertable_v"+str(i)+".csv", "wb") as lbfo:
826
+ bucketres.download_fileobj("$unique_model_id/"+"model_eval_data_mastertable_v"+str(i)+".csv", lbfo)
827
+ newleaderboard = pd.read_csv("/tmp/"+"model_eval_data_mastertable_v"+str(i)+".csv", sep="\t")
828
+ newleaderboard.drop(newleaderboard.filter(regex="Unname"),axis=1, inplace=True)
829
+
830
+ leaderboard=leaderboard.append(newleaderboard).drop_duplicates()
831
+
832
+ leaderboard.drop(leaderboard.filter(regex="Unname"),axis=1, inplace=True)
833
+ #save new leaderboard here
834
+ leaderboard.to_csv("/tmp/"+"model_eval_data_mastertable.csv",sep="\t",index=False)
835
+ s3_client.upload_file("/tmp/"+"model_eval_data_mastertable.csv", bucket, model_id + "/model_eval_data_mastertable.csv")
836
+
837
+
838
+ else:
839
+ pass
840
+ s3 = boto3.resource("s3")
841
+ bucket = s3.Bucket("$bucket_name")
842
+ with open("/tmp/"+"model_eval_data_mastertable.csv", "wb") as lbfo:
843
+ bucket.download_fileobj("$unique_model_id/"+"model_eval_data_mastertable.csv", lbfo)
844
+ leaderboard = pd.read_csv("/tmp/"+"model_eval_data_mastertable.csv", sep="\t")
845
+
846
+
847
+ clf =["accuracy", "f1_score", "precision", "recall"]
848
+ reg = ['mse', 'rmse', 'mae', 'r2']
849
+ other = ['timestamp']
850
+
851
+ if columns:
852
+ leaderboard = leaderboard.filter(clf+reg+columns+other)
853
+
854
+
855
+ if task_type == "classification":
856
+ leaderboard_eval_metrics = leaderboard[clf]
857
+ else:
858
+ leaderboard_eval_metrics = leaderboard[reg]
859
+
860
+ leaderboard_model_meta = leaderboard.drop(clf+reg, axis=1).replace(0,np.nan).dropna(axis=1,how="all")
861
+
862
+ leaderboard = pd.concat([leaderboard_eval_metrics, leaderboard_model_meta], axis=1, ignore_index=False)
863
+
864
+ if verbose == 1:
865
+ leaderboard = leaderboard.filter(regex=("^(?!.*(_layers|_act))"))
866
+ elif verbose == 2:
867
+ leaderboard = leaderboard.filter(regex=("^(?!.*_act)"))
868
+
869
+
870
+ if task_type == "classification":
871
+ sort_cols = ["accuracy", "f1_score", "precision", "recall"]
872
+ #leaderboard = leaderboard.drop(columns = ['mse', 'rmse', 'mae', 'r2'])
873
+
874
+ else:
875
+ sort_cols = ["-mae", "r2"]
876
+
877
+ ranks = []
878
+ for col in sort_cols:
879
+ ascending = False
880
+ if col[0] == "-":
881
+ col = col[1:]
882
+ ascending = True
883
+
884
+ ranks.append(leaderboard[col].rank(method="dense", ascending=ascending))
885
+
886
+ ranks = np.mean(ranks, axis=0)
887
+ order = np.argsort(ranks)
888
+
889
+ leaderboard = leaderboard.loc[order].reset_index().drop("index", axis=1).drop_duplicates(subset=['version', 'username'], keep='last')
890
+ leaderboard.drop(leaderboard.filter(regex="Unname"),axis=1, inplace=True)
891
+ # }}}
892
+
893
+ leaderboard['username']=leaderboard.pop("username")
894
+ leaderboard['timestamp'] = leaderboard.pop("timestamp")
895
+ leaderboard['version'] = leaderboard.pop("version")
896
+
897
+ return leaderboard
898
+
899
+
900
+
901
+ def layer_mapping(direction='torch_to_keras', activation=False):
902
+
903
+ torch_keras = {'AdaptiveAvgPool1d': 'AvgPool1D',
904
+ 'AdaptiveAvgPool2d': 'AvgPool2D',
905
+ 'AdaptiveAvgPool3d': 'AvgPool3D',
906
+ 'AdaptiveMaxPool1d': 'MaxPool1D',
907
+ 'AdaptiveMaxPool2d': 'MaxPool2D',
908
+ 'AdaptiveMaxPool3d': 'MaxPool3D',
909
+ 'AlphaDropout': None,
910
+ 'AvgPool1d': 'AvgPool1D',
911
+ 'AvgPool2d': 'AvgPool2D',
912
+ 'AvgPool3d': 'AvgPool3D',
913
+ 'BatchNorm1d': 'BatchNormalization',
914
+ 'BatchNorm2d': 'BatchNormalization',
915
+ 'BatchNorm3d': 'BatchNormalization',
916
+ 'Bilinear': None,
917
+ 'ConstantPad1d': None,
918
+ 'ConstantPad2d': None,
919
+ 'ConstantPad3d': None,
920
+ 'Container': None,
921
+ 'Conv1d': 'Conv1D',
922
+ 'Conv2d': 'Conv2D',
923
+ 'Conv3d': 'Conv3D',
924
+ 'ConvTranspose1d': 'Conv1DTranspose',
925
+ 'ConvTranspose2d': 'Conv2DTranspose',
926
+ 'ConvTranspose3d': 'Conv3DTranspose',
927
+ 'CosineSimilarity': None,
928
+ 'CrossMapLRN2d': None,
929
+ 'DataParallel': None,
930
+ 'Dropout': 'Dropout',
931
+ 'Dropout2d': 'Dropout',
932
+ 'Dropout3d': 'Dropout',
933
+ 'Embedding': 'Embedding',
934
+ 'EmbeddingBag': 'Embedding',
935
+ 'FeatureAlphaDropout': None,
936
+ 'Flatten': 'Flatten',
937
+ 'Fold': None,
938
+ 'FractionalMaxPool2d': "MaxPool2D",
939
+ 'FractionalMaxPool3d': "MaxPool3D",
940
+ 'GRU': 'GRU',
941
+ 'GRUCell': 'GRUCell',
942
+ 'GroupNorm': None,
943
+ 'Identity': None,
944
+ 'InstanceNorm1d': None,
945
+ 'InstanceNorm2d': None,
946
+ 'InstanceNorm3d': None,
947
+ 'LPPool1d': None,
948
+ 'LPPool2d': None,
949
+ 'LSTM': 'LSTM',
950
+ 'LSTMCell': 'LSTMCell',
951
+ 'LayerNorm': None,
952
+ 'Linear': 'Dense',
953
+ 'LocalResponseNorm': None,
954
+ 'MaxPool1d': 'MaxPool1D',
955
+ 'MaxPool2d': 'MaxPool2D',
956
+ 'MaxPool3d': 'MaxPool3D',
957
+ 'MaxUnpool1d': None,
958
+ 'MaxUnpool2d': None,
959
+ 'MaxUnpool3d': None,
960
+ 'Module': None,
961
+ 'ModuleDict': None,
962
+ 'ModuleList': None,
963
+ 'PairwiseDistance': None,
964
+ 'Parameter': None,
965
+ 'ParameterDict': None,
966
+ 'ParameterList': None,
967
+ 'PixelShuffle': None,
968
+ 'RNN': 'RNN',
969
+ 'RNNBase': None,
970
+ 'RNNCell': None,
971
+ 'RNNCellBase': None,
972
+ 'ReflectionPad1d': None,
973
+ 'ReflectionPad2d': None,
974
+ 'ReplicationPad1d': None,
975
+ 'ReplicationPad2d': None,
976
+ 'ReplicationPad3d': None,
977
+ 'Sequential': None,
978
+ 'SyncBatchNorm': None,
979
+ 'Transformer': None,
980
+ 'TransformerDecoder': None,
981
+ 'TransformerDecoderLayer': None,
982
+ 'TransformerEncoder': None,
983
+ 'TransformerEncoderLayer': None,
984
+ 'Unfold': None,
985
+ 'Upsample': 'UpSampling1D',
986
+ 'UpsamplingBilinear2d': 'UpSampling2D',
987
+ 'UpsamplingNearest2d': 'UpSampling2D',
988
+ 'ZeroPad2d': 'ZeroPadding2D'}
989
+
990
+ keras_torch = {'AbstractRNNCell': None,
991
+ 'Activation': None,
992
+ 'ActivityRegularization': None,
993
+ 'Add': None,
994
+ 'AdditiveAttention': None,
995
+ 'AlphaDropout': None,
996
+ 'Attention': None,
997
+ 'Average': None,
998
+ 'AveragePooling1D': 'AvgPool1d',
999
+ 'AveragePooling2D': 'AvgPool2d',
1000
+ 'AveragePooling3D': 'AvgPool3d',
1001
+ 'AvgPool1D': 'AvgPool1d',
1002
+ 'AvgPool2D': 'AvgPool2d',
1003
+ 'AvgPool3D': 'AvgPool3d',
1004
+ 'BatchNormalization': None,
1005
+ 'Bidirectional': None,
1006
+ 'Concatenate': None,
1007
+ 'Conv1D': 'Conv1d',
1008
+ 'Conv1DTranspose': 'ConvTranspose1d',
1009
+ 'Conv2D': 'Conv2d',
1010
+ 'Conv2DTranspose': 'ConvTranspose2d',
1011
+ 'Conv3D': 'Conv3d',
1012
+ 'Conv3DTranspose': 'ConvTranspose3d',
1013
+ 'ConvLSTM2D': None,
1014
+ 'Convolution1D': None,
1015
+ 'Convolution1DTranspose': None,
1016
+ 'Convolution2D': None,
1017
+ 'Convolution2DTranspose': None,
1018
+ 'Convolution3D': None,
1019
+ 'Convolution3DTranspose': None,
1020
+ 'Cropping1D': None,
1021
+ 'Cropping2D': None,
1022
+ 'Cropping3D': None,
1023
+ 'Dense': 'Linear',
1024
+ 'DenseFeatures': None,
1025
+ 'DepthwiseConv2D': None,
1026
+ 'Dot': None,
1027
+ 'Dropout': 'Dropout',
1028
+ 'Embedding': 'Embedding',
1029
+ 'Flatten': 'Flatten',
1030
+ 'GRU': 'GRU',
1031
+ 'GRUCell': 'GRUCell',
1032
+ 'GaussianDropout': None,
1033
+ 'GaussianNoise': None,
1034
+ 'GlobalAveragePooling1D': None,
1035
+ 'GlobalAveragePooling2D': None,
1036
+ 'GlobalAveragePooling3D': None,
1037
+ 'GlobalAvgPool1D': None,
1038
+ 'GlobalAvgPool2D': None,
1039
+ 'GlobalAvgPool3D': None,
1040
+ 'GlobalMaxPool1D': None,
1041
+ 'GlobalMaxPool2D': None,
1042
+ 'GlobalMaxPool3D': None,
1043
+ 'GlobalMaxPooling1D': None,
1044
+ 'GlobalMaxPooling2D': None,
1045
+ 'GlobalMaxPooling3D': None,
1046
+ 'Input': None,
1047
+ 'InputLayer': None,
1048
+ 'InputSpec': None,
1049
+ 'LSTM': 'LSTM',
1050
+ 'LSTMCell': 'LSTMCell',
1051
+ 'Lambda': None,
1052
+ 'Layer': None,
1053
+ 'LayerNormalization': None,
1054
+ 'LocallyConnected1D': None,
1055
+ 'LocallyConnected2D': None,
1056
+ 'Masking': None,
1057
+ 'MaxPool1D': 'MaxPool1d',
1058
+ 'MaxPool2D': 'MaxPool2d',
1059
+ 'MaxPool3D': 'MaxPool3d',
1060
+ 'MaxPooling1D': 'MaxPool1d',
1061
+ 'MaxPooling2D': 'MaxPool2d',
1062
+ 'MaxPooling3D': 'MaxPool3d',
1063
+ 'Maximum': None,
1064
+ 'Minimum': None,
1065
+ 'MultiHeadAttention': None,
1066
+ 'Multiply': None,
1067
+ 'Permute': None,
1068
+ 'RNN': 'RNN',
1069
+ 'RepeatVector': None,
1070
+ 'Reshape': None,
1071
+ 'SeparableConv1D': None,
1072
+ 'SeparableConv2D': None,
1073
+ 'SeparableConvolution1D': None,
1074
+ 'SeparableConvolution2D': None,
1075
+ 'SimpleRNN': None,
1076
+ 'SimpleRNNCell': None,
1077
+ 'SpatialDropout1D': None,
1078
+ 'SpatialDropout2D': None,
1079
+ 'SpatialDropout3D': None,
1080
+ 'StackedRNNCells': None,
1081
+ 'Subtract': None,
1082
+ 'TimeDistributed': None,
1083
+ 'UpSampling1D': 'Upsample',
1084
+ 'UpSampling2D': None,
1085
+ 'UpSampling3D': None,
1086
+ 'Wrapper': None,
1087
+ 'ZeroPadding1D': None,
1088
+ 'ZeroPadding2D': 'ZeroPad2d',
1089
+ 'ZeroPadding3D': None}
1090
+
1091
+ torch_keras_act = {
1092
+ 'AdaptiveLogSoftmaxWithLoss': None,
1093
+ 'CELU': None,
1094
+ 'ELU': 'elu',
1095
+ 'GELU': 'gelu',
1096
+ 'GLU': None,
1097
+ 'Hardshrink': None,
1098
+ 'Hardsigmoid': 'hard_sigmoid',
1099
+ 'Hardswish': None,
1100
+ 'Hardtanh': None,
1101
+ 'LeakyReLU': 'LeakyReLU',
1102
+ 'LogSigmoid': None,
1103
+ 'LogSoftmax': None,
1104
+ 'Mish': None,
1105
+ 'MultiheadAttention': None,
1106
+ 'PReLU': 'PReLU',
1107
+ 'RReLU': None,
1108
+ 'ReLU': 'relu',
1109
+ 'ReLU6': 'relu',
1110
+ 'SELU': 'selu',
1111
+ 'SiLU': 'swish',
1112
+ 'Sigmoid': 'sigmoid',
1113
+ 'Softmax': 'softmax',
1114
+ 'Softmax2d': None,
1115
+ 'Softmin': None,
1116
+ 'Softplus': 'softplus',
1117
+ 'Softshrink': None,
1118
+ 'Softsign': 'softsign',
1119
+ 'Tanh': 'tanh',
1120
+ 'Tanhshrink': None,
1121
+ 'Threshold': None}
1122
+
1123
+ keras_torch_act = {
1124
+ 'ELU': 'ELU',
1125
+ 'LeakyReLU': 'LeakyReLU',
1126
+ 'PReLU': 'PReLU',
1127
+ 'ReLU': 'ReLU',
1128
+ 'Softmax': 'Softmax',
1129
+ 'ThresholdedReLU': None,
1130
+ 'elu': 'ELU',
1131
+ 'exponential': None,
1132
+ 'gelu': 'GELU',
1133
+ 'hard_sigmoid': 'Hardsigmoid',
1134
+ 'relu': 'ReLU',
1135
+ 'selu': 'SELU',
1136
+ 'serialize': None,
1137
+ 'sigmoid': 'Sigmoid',
1138
+ 'softmax': 'Softmax',
1139
+ 'softplus': 'Softplus',
1140
+ 'softsign': 'Softsign',
1141
+ 'swish': 'SiLU',
1142
+ 'tanh': 'Tanh'}
1143
+
1144
+
1145
+ if direction == 'torch_to_keras' and activation:
1146
+
1147
+ return torch_keras_act
1148
+
1149
+ elif direction == 'kreas_to_torch' and not activation:
1150
+
1151
+ return keras_torch_act
1152
+
1153
+ elif direction == 'torch_to_keras':
1154
+
1155
+ return torch_keras
1156
+
1157
+ elif direction == 'keras_to_torch':
1158
+
1159
+ return keras_torch
1160
+
1161
+
1162
+ def rename_layers(in_layers, direction="torch_to_keras", activation=False):
1163
+
1164
+ mapping_dict = layer_mapping(direction=direction, activation=activation)
1165
+
1166
+ out_layers = []
1167
+
1168
+ for i in in_layers:
1169
+
1170
+ layer_name_temp = mapping_dict.get(i, None)
1171
+
1172
+ if layer_name_temp == None:
1173
+ out_layers.append(i)
1174
+ else:
1175
+ out_layers.append(layer_name_temp)
1176
+
1177
+ return out_layers
1178
+
1179
+
1180
+
1181
+
1182
+ ####################################################################
1183
+ ############################ S3 connect ############################
1184
+
1185
+
1186
+ import boto3
1187
+ import pandas as pd
1188
+ import os
1189
+ import json
1190
+ import pickle
1191
+ import six
1192
+ import onnx
1193
+ import logging
1194
+ from botocore.exceptions import ClientError
1195
+
1196
+ def get_exampledata(example_data_filename = "exampledata.json"):
1197
+ s3 = boto3.resource("s3")
1198
+ bucket = s3.Bucket("$bucket_name")
1199
+
1200
+ with open("/tmp/exampledata.json", "wb") as exampledatapath:
1201
+ bucket.download_fileobj("$unique_model_id/exampledata.json", exampledatapath)
1202
+ exampledatajson = json.load(open("/tmp/exampledata.json","rb") )
1203
+ return exampledatajson
1204
+
1205
+ def get_ytestdata(ytest_s3_filename="ytest.pkl"):
1206
+
1207
+ s3 = boto3.resource("s3")
1208
+ bucket = s3.Bucket("$bucket_name")
1209
+
1210
+ with open("/tmp/ytest.pkl", "wb") as ytestfo:
1211
+ bucket.download_fileobj("$unique_model_id/ytest.pkl", ytestfo)
1212
+ ytestdata = pickle.load(open("/tmp/ytest.pkl", "rb" ) )
1213
+ return ytestdata
1214
+
1215
+
1216
+ def get_onnx_temp(version):
1217
+
1218
+ onnx_model_name = "onnx_model_v{version}.onnx".format(version = version)
1219
+ s3 = boto3.resource("s3")
1220
+ bucket = s3.Bucket("$bucket_name")
1221
+ with open("/tmp/"+onnx_model_name, "wb") as onnxfo:
1222
+ bucket.download_fileobj("$unique_model_id/"+onnx_model_name, onnxfo)
1223
+ onnx_model = onnx.load("/tmp/"+onnx_model_name)
1224
+ return onnx_model
1225
+
1226
+ def get_onnx_string(version):
1227
+
1228
+ onnx_model_name = "onnx_model_v{version}.onnx".format(version = version)
1229
+ s3 = boto3.resource('s3')
1230
+ obj = s3.Object("$bucket_name", "$unique_model_id/"+onnx_model_name)
1231
+ onnx_string = obj.get()['Body'].read()
1232
+
1233
+ return onnx_string
1234
+
1235
+ def get_model_metadata(version):
1236
+ s3 = boto3.resource("s3")
1237
+ bucket = s3.Bucket("$bucket_name")
1238
+
1239
+ try:
1240
+ if version == None:
1241
+ with open("/tmp/metadata.json", "wb") as temp_path:
1242
+ bucket.download_fileobj("$unique_model_id/runtime_metadata.json", temp_path)
1243
+ else:
1244
+ with open("/tmp/metadata.json", "wb") as temp_path:
1245
+ bucket.download_fileobj("$unique_model_id/model_metadata_v{}.json".format(version), temp_path)
1246
+
1247
+ model_metadata_json = json.load(open("/tmp/metadata.json","rb"))
1248
+ except botocore.exceptions.ClientError as e:
1249
+ if e.response['Error']['Code'] == "404":
1250
+ print("The object does not exist.")
1251
+ return None
1252
+ else:
1253
+ raise
1254
+ return model_metadata_json
1255
+
1256
+ def get_onnx_mem(version):
1257
+
1258
+ onnx_string = get_onnx_string(version)
1259
+ onnx_model = onnx.load_from_string(onnx_string)
1260
+
1261
+ return onnx_model
1262
+
1263
+
1264
+ def upload_file(file_name, bucket, object_name=None):
1265
+ """Upload a file to an S3 bucket
1266
+
1267
+ :param file_name: File to upload
1268
+ :param bucket: Bucket to upload to
1269
+ :param object_name: S3 object name. If not specified then file_name is used
1270
+ :return: True if file was uploaded, else False
1271
+ """
1272
+
1273
+ # If S3 object_name was not specified, use file_name
1274
+ if object_name is None:
1275
+ object_name = file_name
1276
+
1277
+ # Upload the file
1278
+ s3_client = boto3.client('s3')
1279
+ try:
1280
+ response = s3_client.upload_file(file_name, bucket, object_name)
1281
+ except ClientError as e:
1282
+ logging.error(e)
1283
+ return False
1284
+ return True
1285
+
1286
+
1287
+ #Objective: for public competitions: allow all end users to submit to competition as long as they have
1288
+ #...an aimodelshare end username and password, (no aws key / password necessary)
1289
+
1290
+ #TODOs: Use example starter code below plus any code necessary to get model version (i.e. leaderboard data)...
1291
+ #...to allow model submitters access to upload model onnx file + preprocessor (and eventually + post_processor.zip
1292
+ #...crucial thing is that all file uploads and any necessary downloads only allow end users to upload/download files with
1293
+ #...specific new name we desire.
1294
+
1295
+ def _get_file_list(client, bucket,keysubfolderid):
1296
+ # Reading file list {{{
1297
+ try:
1298
+ objects = client.list_objects(Bucket=bucket,Prefix=keysubfolderid)
1299
+ except Exception as err:
1300
+ return None, err
1301
+
1302
+ file_list = []
1303
+ if "Contents" in objects:
1304
+ for key in objects["Contents"]:
1305
+ file_list.append(key["Key"].split("/")[1])
1306
+ # }}}
1307
+
1308
+ return file_list, None
1309
+
1310
+
1311
+ # STARTER CODE EXPLAINED: Starter code returns url to download (get_object) or upload (put_object) for a single file
1312
+ # need to repeat process for as many files as we allow uploads and downloads for.
1313
+
1314
+ def generate_presigned_url(s3_client, client_method, method_parameters, expires_in):
1315
+ """
1316
+ Generate a presigned Amazon S3 URL that can be used to perform an action.
1317
+
1318
+ :param s3_client: A Boto3 Amazon S3 client.
1319
+ :param client_method: The name of the client method that the URL performs.
1320
+ :param method_parameters: The parameters of the specified client method.
1321
+ :param expires_in: The number of seconds the presigned URL is valid for.
1322
+ :return: The presigned URL.
1323
+ """
1324
+ try:
1325
+ url = s3_client.generate_presigned_url(
1326
+ ClientMethod=client_method,
1327
+ Params=method_parameters,
1328
+ ExpiresIn=expires_in
1329
+ )
1330
+ logger.info("Got presigned URL: %s", url)
1331
+ except ClientError:
1332
+ logger.exception(
1333
+ "Couldn't get a presigned URL for client method '%s'.", client_method)
1334
+ raise
1335
+ return url
1336
+
1337
+ from botocore.exceptions import ClientError
1338
+
1339
+
1340
+ def create_presigned_post(bucket_name, object_name,
1341
+ fields=None, conditions=None, expiration=600):
1342
+ """Generate a presigned URL S3 POST request to upload a file
1343
+
1344
+ :param bucket_name: string
1345
+ :param object_name: string
1346
+ :param fields: Dictionary of prefilled form fields
1347
+ :param conditions: List of conditions to include in the policy
1348
+ :param expiration: Time in seconds for the presigned URL to remain valid
1349
+ :return: Dictionary with the following keys:
1350
+ url: URL to post to
1351
+ fields: Dictionary of form fields and values to submit with the POST
1352
+ :return: None if error.
1353
+ """
1354
+
1355
+ # Generate a presigned S3 POST URL
1356
+ s3_client = boto3.client('s3')
1357
+ try:
1358
+ response = s3_client.generate_presigned_post(bucket_name,
1359
+ object_name,
1360
+ Fields=fields,
1361
+ Conditions=conditions,
1362
+ ExpiresIn=expiration)
1363
+ except ClientError as e:
1364
+ logging.error(e)
1365
+ return None
1366
+
1367
+ # The response contains the presigned URL and required fields
1368
+ return response
1369
+
1370
+ def get_authorizedcompetitionuserdata(example_data_filename = "competitionuserdata.json"):
1371
+ s3 = boto3.resource("s3")
1372
+ bucket = s3.Bucket("$bucket_name")
1373
+
1374
+ with open("/tmp/competitionuserdata.json", "wb") as exampledatapath:
1375
+ bucket.download_fileobj("$unique_model_id/competitionuserdata.json", exampledatapath)
1376
+ competitionuserdatajson = json.load(open("/tmp/competitionuserdata.json","rb") )
1377
+ return competitionuserdatajson
1378
+
1379
+ def get_reproducibility_env(version=None):
1380
+ s3 = boto3.resource("s3")
1381
+ bucket = s3.Bucket("$bucket_name")
1382
+
1383
+ try:
1384
+ if version == None:
1385
+ with open("/tmp/reproducibility.json", "wb") as temp_path:
1386
+ bucket.download_fileobj("$unique_model_id/runtime_reproducibility.json", temp_path)
1387
+ else:
1388
+ with open("/tmp/reproducibility.json", "wb") as temp_path:
1389
+ bucket.download_fileobj("$unique_model_id/reproducibility_v{}.json".format(version), temp_path)
1390
+
1391
+ reproducibility_env_json = json.load(open("/tmp/reproducibility.json","rb"))
1392
+ except botocore.exceptions.ClientError as e:
1393
+ if e.response['Error']['Code'] == "404":
1394
+ print("The object does not exist.")
1395
+ return None
1396
+ else:
1397
+ raise
1398
+ return reproducibility_env_json
1399
+
1400
+ def get_eval_metric(eval_metric_s3_filename):
1401
+
1402
+ import pickle
1403
+ from zipfile import ZipFile
1404
+ from io import BytesIO
1405
+ import os
1406
+
1407
+ s3 = boto3.resource("s3")
1408
+ bucket = s3.Bucket("$bucket_name")
1409
+
1410
+ zip_obj = s3.Object(bucket_name="$bucket_name",
1411
+ key="$unique_model_id/"+eval_metric_s3_filename)
1412
+
1413
+ buffer = BytesIO(zip_obj.get()["Body"].read())
1414
+
1415
+ z = ZipFile(buffer)
1416
+ # Extract all the contents of zip file in current directory
1417
+ z.extractall("/tmp/")
1418
+
1419
+ metric_py = eval_metric_s3_filename.split('.')[-2] + '.py'
1420
+
1421
+ folderpath = os.path.dirname(os.path.abspath("/tmp/"+metric_py))
1422
+ file_name = os.path.basename("/tmp/"+metric_py)
1423
+
1424
+ pickle_file_list = []
1425
+ for file in os.listdir(folderpath):
1426
+ if file.endswith(".pkl"):
1427
+ pickle_file_list.append(os.path.join(folderpath, file))
1428
+
1429
+ for i in pickle_file_list:
1430
+ objectname = str(os.path.basename(i)).replace(".pkl", "")
1431
+ objects = {objectname: ""}
1432
+ globals()[objectname] = pickle.load(open(str(i), "rb"))
1433
+
1434
+ metric_py = metric_py.replace("metrics_", "")
1435
+
1436
+ exec(open(os.path.join(folderpath, metric_py)).read(), globals())
1437
+
1438
+ print(globals()['custom_eval_metric'])
1439
+ eval_metric = globals()['custom_eval_metric']
1440
+
1441
+ return eval_metric