aimodelshare 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aimodelshare/README.md +26 -0
- aimodelshare/__init__.py +100 -0
- aimodelshare/aimsonnx.py +2381 -0
- aimodelshare/api.py +836 -0
- aimodelshare/auth.py +163 -0
- aimodelshare/aws.py +511 -0
- aimodelshare/aws_client.py +173 -0
- aimodelshare/base_image.py +154 -0
- aimodelshare/bucketpolicy.py +106 -0
- aimodelshare/color_mappings/color_mapping_keras.csv +121 -0
- aimodelshare/color_mappings/color_mapping_pytorch.csv +117 -0
- aimodelshare/containerisation.py +244 -0
- aimodelshare/containerization.py +712 -0
- aimodelshare/containerization_templates/Dockerfile.txt +8 -0
- aimodelshare/containerization_templates/Dockerfile_PySpark.txt +23 -0
- aimodelshare/containerization_templates/buildspec.txt +14 -0
- aimodelshare/containerization_templates/lambda_function.txt +40 -0
- aimodelshare/custom_approach/__init__.py +1 -0
- aimodelshare/custom_approach/lambda_function.py +17 -0
- aimodelshare/custom_eval_metrics.py +103 -0
- aimodelshare/data_sharing/__init__.py +0 -0
- aimodelshare/data_sharing/data_sharing_templates/Dockerfile.txt +3 -0
- aimodelshare/data_sharing/data_sharing_templates/__init__.py +1 -0
- aimodelshare/data_sharing/data_sharing_templates/buildspec.txt +15 -0
- aimodelshare/data_sharing/data_sharing_templates/codebuild_policies.txt +129 -0
- aimodelshare/data_sharing/data_sharing_templates/codebuild_trust_relationship.txt +12 -0
- aimodelshare/data_sharing/download_data.py +620 -0
- aimodelshare/data_sharing/share_data.py +373 -0
- aimodelshare/data_sharing/utils.py +8 -0
- aimodelshare/deploy_custom_lambda.py +246 -0
- aimodelshare/documentation/Makefile +20 -0
- aimodelshare/documentation/karma_sphinx_theme/__init__.py +28 -0
- aimodelshare/documentation/karma_sphinx_theme/_version.py +2 -0
- aimodelshare/documentation/karma_sphinx_theme/breadcrumbs.html +70 -0
- aimodelshare/documentation/karma_sphinx_theme/layout.html +172 -0
- aimodelshare/documentation/karma_sphinx_theme/search.html +50 -0
- aimodelshare/documentation/karma_sphinx_theme/searchbox.html +14 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css +2 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css +2751 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css +2 -0
- aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css.map +1 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.eot +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.svg +32 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.ttf +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff2 +0 -0
- aimodelshare/documentation/karma_sphinx_theme/static/js/theme.js +68 -0
- aimodelshare/documentation/karma_sphinx_theme/theme.conf +9 -0
- aimodelshare/documentation/make.bat +35 -0
- aimodelshare/documentation/requirements.txt +2 -0
- aimodelshare/documentation/source/about.rst +18 -0
- aimodelshare/documentation/source/advanced_features.rst +137 -0
- aimodelshare/documentation/source/competition.rst +218 -0
- aimodelshare/documentation/source/conf.py +58 -0
- aimodelshare/documentation/source/create_credentials.rst +86 -0
- aimodelshare/documentation/source/example_notebooks.rst +132 -0
- aimodelshare/documentation/source/functions.rst +151 -0
- aimodelshare/documentation/source/gettingstarted.rst +390 -0
- aimodelshare/documentation/source/images/creds1.png +0 -0
- aimodelshare/documentation/source/images/creds2.png +0 -0
- aimodelshare/documentation/source/images/creds3.png +0 -0
- aimodelshare/documentation/source/images/creds4.png +0 -0
- aimodelshare/documentation/source/images/creds5.png +0 -0
- aimodelshare/documentation/source/images/creds_file_example.png +0 -0
- aimodelshare/documentation/source/images/predict_tab.png +0 -0
- aimodelshare/documentation/source/index.rst +110 -0
- aimodelshare/documentation/source/modelplayground.rst +132 -0
- aimodelshare/exceptions.py +11 -0
- aimodelshare/generatemodelapi.py +1270 -0
- aimodelshare/iam/codebuild_policy.txt +129 -0
- aimodelshare/iam/codebuild_trust_relationship.txt +12 -0
- aimodelshare/iam/lambda_policy.txt +15 -0
- aimodelshare/iam/lambda_trust_relationship.txt +12 -0
- aimodelshare/json_templates/__init__.py +1 -0
- aimodelshare/json_templates/api_json.txt +155 -0
- aimodelshare/json_templates/auth/policy.txt +1 -0
- aimodelshare/json_templates/auth/role.txt +1 -0
- aimodelshare/json_templates/eval/policy.txt +1 -0
- aimodelshare/json_templates/eval/role.txt +1 -0
- aimodelshare/json_templates/function/policy.txt +1 -0
- aimodelshare/json_templates/function/role.txt +1 -0
- aimodelshare/json_templates/integration_response.txt +5 -0
- aimodelshare/json_templates/lambda_policy_1.txt +15 -0
- aimodelshare/json_templates/lambda_policy_2.txt +8 -0
- aimodelshare/json_templates/lambda_role_1.txt +12 -0
- aimodelshare/json_templates/lambda_role_2.txt +16 -0
- aimodelshare/leaderboard.py +174 -0
- aimodelshare/main/1.txt +132 -0
- aimodelshare/main/1B.txt +112 -0
- aimodelshare/main/2.txt +153 -0
- aimodelshare/main/3.txt +134 -0
- aimodelshare/main/4.txt +128 -0
- aimodelshare/main/5.txt +109 -0
- aimodelshare/main/6.txt +105 -0
- aimodelshare/main/7.txt +144 -0
- aimodelshare/main/8.txt +142 -0
- aimodelshare/main/__init__.py +1 -0
- aimodelshare/main/authorization.txt +275 -0
- aimodelshare/main/eval_classification.txt +79 -0
- aimodelshare/main/eval_lambda.txt +1709 -0
- aimodelshare/main/eval_regression.txt +80 -0
- aimodelshare/main/lambda_function.txt +8 -0
- aimodelshare/main/nst.txt +149 -0
- aimodelshare/model.py +1543 -0
- aimodelshare/modeluser.py +215 -0
- aimodelshare/moral_compass/README.md +408 -0
- aimodelshare/moral_compass/__init__.py +65 -0
- aimodelshare/moral_compass/_version.py +3 -0
- aimodelshare/moral_compass/api_client.py +601 -0
- aimodelshare/moral_compass/apps/__init__.py +69 -0
- aimodelshare/moral_compass/apps/ai_consequences.py +540 -0
- aimodelshare/moral_compass/apps/bias_detective.py +714 -0
- aimodelshare/moral_compass/apps/ethical_revelation.py +898 -0
- aimodelshare/moral_compass/apps/fairness_fixer.py +889 -0
- aimodelshare/moral_compass/apps/judge.py +888 -0
- aimodelshare/moral_compass/apps/justice_equity_upgrade.py +853 -0
- aimodelshare/moral_compass/apps/mc_integration_helpers.py +820 -0
- aimodelshare/moral_compass/apps/model_building_game.py +1104 -0
- aimodelshare/moral_compass/apps/model_building_game_beginner.py +687 -0
- aimodelshare/moral_compass/apps/moral_compass_challenge.py +858 -0
- aimodelshare/moral_compass/apps/session_auth.py +254 -0
- aimodelshare/moral_compass/apps/shared_activity_styles.css +349 -0
- aimodelshare/moral_compass/apps/tutorial.py +481 -0
- aimodelshare/moral_compass/apps/what_is_ai.py +853 -0
- aimodelshare/moral_compass/challenge.py +365 -0
- aimodelshare/moral_compass/config.py +187 -0
- aimodelshare/placeholders/model.onnx +0 -0
- aimodelshare/placeholders/preprocessor.zip +0 -0
- aimodelshare/playground.py +1968 -0
- aimodelshare/postprocessormodules.py +157 -0
- aimodelshare/preprocessormodules.py +373 -0
- aimodelshare/pyspark/1.txt +195 -0
- aimodelshare/pyspark/1B.txt +181 -0
- aimodelshare/pyspark/2.txt +220 -0
- aimodelshare/pyspark/3.txt +204 -0
- aimodelshare/pyspark/4.txt +187 -0
- aimodelshare/pyspark/5.txt +178 -0
- aimodelshare/pyspark/6.txt +174 -0
- aimodelshare/pyspark/7.txt +211 -0
- aimodelshare/pyspark/8.txt +206 -0
- aimodelshare/pyspark/__init__.py +1 -0
- aimodelshare/pyspark/authorization.txt +258 -0
- aimodelshare/pyspark/eval_classification.txt +79 -0
- aimodelshare/pyspark/eval_lambda.txt +1441 -0
- aimodelshare/pyspark/eval_regression.txt +80 -0
- aimodelshare/pyspark/lambda_function.txt +8 -0
- aimodelshare/pyspark/nst.txt +213 -0
- aimodelshare/python/my_preprocessor.py +58 -0
- aimodelshare/readme.md +26 -0
- aimodelshare/reproducibility.py +181 -0
- aimodelshare/sam/Dockerfile.txt +8 -0
- aimodelshare/sam/Dockerfile_PySpark.txt +24 -0
- aimodelshare/sam/__init__.py +1 -0
- aimodelshare/sam/buildspec.txt +11 -0
- aimodelshare/sam/codebuild_policies.txt +129 -0
- aimodelshare/sam/codebuild_trust_relationship.txt +12 -0
- aimodelshare/sam/codepipeline_policies.txt +173 -0
- aimodelshare/sam/codepipeline_trust_relationship.txt +12 -0
- aimodelshare/sam/spark-class.txt +2 -0
- aimodelshare/sam/template.txt +54 -0
- aimodelshare/tools.py +103 -0
- aimodelshare/utils/__init__.py +78 -0
- aimodelshare/utils/optional_deps.py +38 -0
- aimodelshare/utils.py +57 -0
- aimodelshare-0.3.7.dist-info/METADATA +298 -0
- aimodelshare-0.3.7.dist-info/RECORD +171 -0
- aimodelshare-0.3.7.dist-info/WHEEL +5 -0
- aimodelshare-0.3.7.dist-info/licenses/LICENSE +5 -0
- aimodelshare-0.3.7.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import boto3
|
|
2
|
+
import pandas as pd
|
|
3
|
+
import os
|
|
4
|
+
import numpy as np
|
|
5
|
+
import onnxruntime as rt
|
|
6
|
+
import json
|
|
7
|
+
import sklearn
|
|
8
|
+
from sklearn.metrics import accuracy_score
|
|
9
|
+
from sklearn.metrics import f1_score
|
|
10
|
+
from sklearn.metrics import precision_score
|
|
11
|
+
from sklearn.metrics import recall_score
|
|
12
|
+
from sklearn.metrics import roc_auc_score
|
|
13
|
+
from sklearn.metrics import mean_squared_error
|
|
14
|
+
from sklearn.metrics import r2_score
|
|
15
|
+
from sklearn.metrics import mean_absolute_error
|
|
16
|
+
from math import sqrt
|
|
17
|
+
import pickle
|
|
18
|
+
import six
|
|
19
|
+
|
|
20
|
+
def get_ytestdata(ytest_s3_filename="ytest.pkl"):
|
|
21
|
+
s3 = boto3.resource("s3")
|
|
22
|
+
bucket = s3.Bucket("$bucket_name")
|
|
23
|
+
|
|
24
|
+
with open("/tmp/ytest.pkl", "wb") as ytestfo:
|
|
25
|
+
bucket.download_fileobj("$unique_model_id/ytest.pkl", ytestfo)
|
|
26
|
+
ytestdata = pickle.load(open("/tmp/ytest.pkl", "rb" ) )
|
|
27
|
+
return ytestdata
|
|
28
|
+
|
|
29
|
+
def model_eval_metrics(y_true, y_pred,classification="TRUE"):
|
|
30
|
+
if classification=="TRUE":
|
|
31
|
+
accuracy_eval = accuracy_score(y_true, y_pred)
|
|
32
|
+
f1_score_eval = f1_score(y_true, y_pred,average="macro",zero_division=0)
|
|
33
|
+
precision_eval = precision_score(y_true, y_pred,average="macro",zero_division=0)
|
|
34
|
+
recall_eval = recall_score(y_true, y_pred,average="macro",zero_division=0)
|
|
35
|
+
mse_eval = 0
|
|
36
|
+
rmse_eval = 0
|
|
37
|
+
mae_eval = 0
|
|
38
|
+
r2_eval = 0
|
|
39
|
+
metricdata = {'accuracy': [accuracy_eval], 'f1_score': [f1_score_eval], 'precision': [precision_eval], 'recall': [recall_eval], 'mse': [mse_eval], 'rmse': [rmse_eval], 'mae': [mae_eval], 'r2': [r2_eval]}
|
|
40
|
+
finalmetricdata = pd.DataFrame.from_dict(metricdata)
|
|
41
|
+
else:
|
|
42
|
+
accuracy_eval = 0
|
|
43
|
+
f1_score_eval = 0
|
|
44
|
+
precision_eval = 0
|
|
45
|
+
recall_eval = 0
|
|
46
|
+
mse_eval = mean_squared_error(y_true, y_pred)
|
|
47
|
+
rmse_eval = sqrt(mean_squared_error(y_true, y_pred))
|
|
48
|
+
mae_eval = mean_absolute_error(y_true, y_pred)
|
|
49
|
+
r2_eval = r2_score(y_true, y_pred)
|
|
50
|
+
metricdata = {'accuracy': [accuracy_eval], 'f1_score': [f1_score_eval], 'precision': [precision_eval], 'recall': [recall_eval], 'mse': [mse_eval], 'rmse': [rmse_eval], 'mae': [mae_eval], 'r2': [r2_eval]}
|
|
51
|
+
finalmetricdata = pd.DataFrame.from_dict(metricdata)
|
|
52
|
+
return finalmetricdata.to_dict('records')[0]
|
|
53
|
+
|
|
54
|
+
ytestdata=get_ytestdata(ytest_s3_filename="ytest.pkl")
|
|
55
|
+
|
|
56
|
+
def evaluate_model(event,ytestdata):
|
|
57
|
+
body = event["body"]
|
|
58
|
+
print(body)
|
|
59
|
+
import six
|
|
60
|
+
if isinstance(event["body"], six.string_types):
|
|
61
|
+
prediction_list = json.loads(event["body"])
|
|
62
|
+
print(prediction_list)
|
|
63
|
+
else:
|
|
64
|
+
prediction_list = event["body"]
|
|
65
|
+
print(prediction_list)
|
|
66
|
+
|
|
67
|
+
result=model_eval_metrics(ytestdata,prediction_list,classification="FALSE")
|
|
68
|
+
return result
|
|
69
|
+
|
|
70
|
+
def handler(event, context):
|
|
71
|
+
result = evaluate_model(event,ytestdata)
|
|
72
|
+
return {"statusCode": 200,
|
|
73
|
+
"headers": {
|
|
74
|
+
"Access-Control-Allow-Origin" : "*",
|
|
75
|
+
"Access-Control-Allow-Credentials": True,
|
|
76
|
+
"Allow" : "GET, OPTIONS, POST",
|
|
77
|
+
"Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
|
|
78
|
+
"Access-Control-Allow-Headers" : "*"
|
|
79
|
+
},
|
|
80
|
+
"body": json.dumps(result)}
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
#Neural Style Transfer Prediction Runtime Code
|
|
2
|
+
|
|
3
|
+
import boto3
|
|
4
|
+
import cv2
|
|
5
|
+
import os
|
|
6
|
+
import numpy as np
|
|
7
|
+
import json
|
|
8
|
+
import onnxruntime as rt
|
|
9
|
+
import base64
|
|
10
|
+
import imghdr
|
|
11
|
+
import six
|
|
12
|
+
from functools import partial
|
|
13
|
+
import os.path
|
|
14
|
+
from os import path
|
|
15
|
+
from io import BytesIO
|
|
16
|
+
from PIL import Image
|
|
17
|
+
|
|
18
|
+
def get_model_onnx(runtimemodel_s3_filename="runtime_model.onnx"):
|
|
19
|
+
s3 = boto3.resource('s3')
|
|
20
|
+
obj = s3.Object("$bucket_name", "$unique_model_id" +
|
|
21
|
+
"/runtime_model.onnx")
|
|
22
|
+
model = rt.InferenceSession(obj.get()['Body'].read())
|
|
23
|
+
return model
|
|
24
|
+
|
|
25
|
+
def get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip"):
|
|
26
|
+
import pickle
|
|
27
|
+
from zipfile import ZipFile
|
|
28
|
+
from io import BytesIO
|
|
29
|
+
import os
|
|
30
|
+
s3 = boto3.resource("s3")
|
|
31
|
+
bucket = s3.Bucket("$bucket_name")
|
|
32
|
+
|
|
33
|
+
zip_obj = s3.Object(bucket_name="$bucket_name", key="$unique_model_id/runtime_preprocessor.zip")
|
|
34
|
+
buffer = BytesIO(zip_obj.get()["Body"].read())
|
|
35
|
+
z = ZipFile(buffer)
|
|
36
|
+
# Extract all the contents of zip file in current directory
|
|
37
|
+
z.extractall("/tmp/")
|
|
38
|
+
|
|
39
|
+
folderpath=os.path.dirname(os.path.abspath("/tmp/preprocessor.py"))
|
|
40
|
+
file_name=os.path.basename("/tmp/preprocessor.py")
|
|
41
|
+
|
|
42
|
+
#Then import all pkl files you want from bucket (need to generate this list from...
|
|
43
|
+
# function globals)
|
|
44
|
+
import os
|
|
45
|
+
pickle_file_list=[]
|
|
46
|
+
for file in os.listdir(folderpath):
|
|
47
|
+
if file.endswith(".pkl"):
|
|
48
|
+
pickle_file_list.append(os.path.join(folderpath, file))
|
|
49
|
+
|
|
50
|
+
for i in pickle_file_list:
|
|
51
|
+
objectname=str(os.path.basename(i)).replace(".pkl","")
|
|
52
|
+
objects={objectname:""}
|
|
53
|
+
globals()[objectname]=pickle.load(open(str(i), "rb" ) )
|
|
54
|
+
# First import preprocessor function to session from preprocessor.py
|
|
55
|
+
exec(open(os.path.join(folderpath,'preprocessor.py')).read(),globals())
|
|
56
|
+
return preprocessor
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def get_runtimedata(runtimedata_s3_filename="runtime_data.json"):
|
|
60
|
+
|
|
61
|
+
s3 = boto3.resource('s3')
|
|
62
|
+
obj = s3.Object("$bucket_name", "$unique_model_id"+"/"+runtimedata_s3_filename)
|
|
63
|
+
runtime_data = json.load(obj.get()['Body'])
|
|
64
|
+
|
|
65
|
+
return runtime_data
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
runtime_data=get_runtimedata(runtimedata_s3_filename="runtime_data.json")
|
|
70
|
+
|
|
71
|
+
preprocessor_type=runtime_data["runtime_preprocessor"]
|
|
72
|
+
|
|
73
|
+
runtime_model=runtime_data["runtime_model"]["name"]
|
|
74
|
+
|
|
75
|
+
model=get_model_onnx(runtimemodel_s3_filename='runtime_model.onnx')
|
|
76
|
+
|
|
77
|
+
# Load preprocessor
|
|
78
|
+
|
|
79
|
+
preprocessor=get_preprocessor(preprocessor_s3_filename="runtime_preprocessor.zip")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def predict(event,model,preprocessor):
|
|
84
|
+
|
|
85
|
+
# Load base64 encoded image stored within "data" key of event dictionary
|
|
86
|
+
body = event["body"]
|
|
87
|
+
|
|
88
|
+
if isinstance(event["body"], six.string_types):
|
|
89
|
+
body = json.loads(event["body"])
|
|
90
|
+
|
|
91
|
+
content= body["content"]
|
|
92
|
+
style= body["style"]
|
|
93
|
+
|
|
94
|
+
# Extract image file extension (e.g.-jpg, png, etc.)
|
|
95
|
+
|
|
96
|
+
sample_content = base64.decodebytes(bytearray(content, "utf-8"))
|
|
97
|
+
sample_style = base64.decodebytes(bytearray(style, "utf-8"))
|
|
98
|
+
|
|
99
|
+
content_file_type=None
|
|
100
|
+
for tf in imghdr.tests:
|
|
101
|
+
image_file_type = tf(sample_content, None)
|
|
102
|
+
if image_file_type:
|
|
103
|
+
break
|
|
104
|
+
content_file_type=image_file_type
|
|
105
|
+
if(content_file_type==None):
|
|
106
|
+
print("This file is not an image, please submit an image base64 encoded image file.")
|
|
107
|
+
|
|
108
|
+
style_file_type=None
|
|
109
|
+
for tf in imghdr.tests:
|
|
110
|
+
image_file_type = tf(sample_style, None)
|
|
111
|
+
if image_file_type:
|
|
112
|
+
break
|
|
113
|
+
style_file_type=image_file_type
|
|
114
|
+
if(style_file_type==None):
|
|
115
|
+
print("This file is not an image, please submit an image base64 encoded image file.")
|
|
116
|
+
|
|
117
|
+
# Save image to local file, read into session, and preprocess image with preprocessor function
|
|
118
|
+
with open("/tmp/imagetopredict."+content_file_type, "wb") as fh:
|
|
119
|
+
fh.write(base64.b64decode(content))
|
|
120
|
+
content_image = preprocessor("/tmp/imagetopredict."+content_file_type)
|
|
121
|
+
with open("/tmp/imagetopredict."+style_file_type, "wb") as fh:
|
|
122
|
+
fh.write(base64.b64decode(style))
|
|
123
|
+
style_image = preprocessor("/tmp/imagetopredict."+style_file_type)
|
|
124
|
+
|
|
125
|
+
# Generate prediction using preprocessed input data
|
|
126
|
+
print("The model expects input shape:", model.get_inputs()[0].shape)
|
|
127
|
+
input_name = model.get_inputs()[0].name
|
|
128
|
+
res = model.run(None, {
|
|
129
|
+
"placeholder": content_image,
|
|
130
|
+
"placeholder_1": style_image
|
|
131
|
+
})
|
|
132
|
+
|
|
133
|
+
pil_img = Image.fromarray(np.uint8(res[0].squeeze(axis=0)*255))
|
|
134
|
+
buffered = BytesIO()
|
|
135
|
+
pil_img.save(buffered, format="JPEG")
|
|
136
|
+
img_str = base64.b64encode(buffered.getvalue())
|
|
137
|
+
return img_str.decode("utf-8")
|
|
138
|
+
|
|
139
|
+
def handler(event, context):
|
|
140
|
+
result = predict(event,model,preprocessor)
|
|
141
|
+
return {"statusCode": 200,
|
|
142
|
+
"headers": {
|
|
143
|
+
"Access-Control-Allow-Origin" : "*",
|
|
144
|
+
"Access-Control-Allow-Credentials": True,
|
|
145
|
+
"Allow" : "GET, OPTIONS, POST",
|
|
146
|
+
"Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
|
|
147
|
+
"Access-Control-Allow-Headers" : "*"
|
|
148
|
+
},
|
|
149
|
+
"body" : json.dumps(result)}
|