aimodelshare 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (171) hide show
  1. aimodelshare/README.md +26 -0
  2. aimodelshare/__init__.py +100 -0
  3. aimodelshare/aimsonnx.py +2381 -0
  4. aimodelshare/api.py +836 -0
  5. aimodelshare/auth.py +163 -0
  6. aimodelshare/aws.py +511 -0
  7. aimodelshare/aws_client.py +173 -0
  8. aimodelshare/base_image.py +154 -0
  9. aimodelshare/bucketpolicy.py +106 -0
  10. aimodelshare/color_mappings/color_mapping_keras.csv +121 -0
  11. aimodelshare/color_mappings/color_mapping_pytorch.csv +117 -0
  12. aimodelshare/containerisation.py +244 -0
  13. aimodelshare/containerization.py +712 -0
  14. aimodelshare/containerization_templates/Dockerfile.txt +8 -0
  15. aimodelshare/containerization_templates/Dockerfile_PySpark.txt +23 -0
  16. aimodelshare/containerization_templates/buildspec.txt +14 -0
  17. aimodelshare/containerization_templates/lambda_function.txt +40 -0
  18. aimodelshare/custom_approach/__init__.py +1 -0
  19. aimodelshare/custom_approach/lambda_function.py +17 -0
  20. aimodelshare/custom_eval_metrics.py +103 -0
  21. aimodelshare/data_sharing/__init__.py +0 -0
  22. aimodelshare/data_sharing/data_sharing_templates/Dockerfile.txt +3 -0
  23. aimodelshare/data_sharing/data_sharing_templates/__init__.py +1 -0
  24. aimodelshare/data_sharing/data_sharing_templates/buildspec.txt +15 -0
  25. aimodelshare/data_sharing/data_sharing_templates/codebuild_policies.txt +129 -0
  26. aimodelshare/data_sharing/data_sharing_templates/codebuild_trust_relationship.txt +12 -0
  27. aimodelshare/data_sharing/download_data.py +620 -0
  28. aimodelshare/data_sharing/share_data.py +373 -0
  29. aimodelshare/data_sharing/utils.py +8 -0
  30. aimodelshare/deploy_custom_lambda.py +246 -0
  31. aimodelshare/documentation/Makefile +20 -0
  32. aimodelshare/documentation/karma_sphinx_theme/__init__.py +28 -0
  33. aimodelshare/documentation/karma_sphinx_theme/_version.py +2 -0
  34. aimodelshare/documentation/karma_sphinx_theme/breadcrumbs.html +70 -0
  35. aimodelshare/documentation/karma_sphinx_theme/layout.html +172 -0
  36. aimodelshare/documentation/karma_sphinx_theme/search.html +50 -0
  37. aimodelshare/documentation/karma_sphinx_theme/searchbox.html +14 -0
  38. aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css +2 -0
  39. aimodelshare/documentation/karma_sphinx_theme/static/css/custom.css.map +1 -0
  40. aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css +2751 -0
  41. aimodelshare/documentation/karma_sphinx_theme/static/css/theme.css.map +1 -0
  42. aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css +2 -0
  43. aimodelshare/documentation/karma_sphinx_theme/static/css/theme.min.css.map +1 -0
  44. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.eot +0 -0
  45. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.svg +32 -0
  46. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.ttf +0 -0
  47. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff +0 -0
  48. aimodelshare/documentation/karma_sphinx_theme/static/font/fontello.woff2 +0 -0
  49. aimodelshare/documentation/karma_sphinx_theme/static/js/theme.js +68 -0
  50. aimodelshare/documentation/karma_sphinx_theme/theme.conf +9 -0
  51. aimodelshare/documentation/make.bat +35 -0
  52. aimodelshare/documentation/requirements.txt +2 -0
  53. aimodelshare/documentation/source/about.rst +18 -0
  54. aimodelshare/documentation/source/advanced_features.rst +137 -0
  55. aimodelshare/documentation/source/competition.rst +218 -0
  56. aimodelshare/documentation/source/conf.py +58 -0
  57. aimodelshare/documentation/source/create_credentials.rst +86 -0
  58. aimodelshare/documentation/source/example_notebooks.rst +132 -0
  59. aimodelshare/documentation/source/functions.rst +151 -0
  60. aimodelshare/documentation/source/gettingstarted.rst +390 -0
  61. aimodelshare/documentation/source/images/creds1.png +0 -0
  62. aimodelshare/documentation/source/images/creds2.png +0 -0
  63. aimodelshare/documentation/source/images/creds3.png +0 -0
  64. aimodelshare/documentation/source/images/creds4.png +0 -0
  65. aimodelshare/documentation/source/images/creds5.png +0 -0
  66. aimodelshare/documentation/source/images/creds_file_example.png +0 -0
  67. aimodelshare/documentation/source/images/predict_tab.png +0 -0
  68. aimodelshare/documentation/source/index.rst +110 -0
  69. aimodelshare/documentation/source/modelplayground.rst +132 -0
  70. aimodelshare/exceptions.py +11 -0
  71. aimodelshare/generatemodelapi.py +1270 -0
  72. aimodelshare/iam/codebuild_policy.txt +129 -0
  73. aimodelshare/iam/codebuild_trust_relationship.txt +12 -0
  74. aimodelshare/iam/lambda_policy.txt +15 -0
  75. aimodelshare/iam/lambda_trust_relationship.txt +12 -0
  76. aimodelshare/json_templates/__init__.py +1 -0
  77. aimodelshare/json_templates/api_json.txt +155 -0
  78. aimodelshare/json_templates/auth/policy.txt +1 -0
  79. aimodelshare/json_templates/auth/role.txt +1 -0
  80. aimodelshare/json_templates/eval/policy.txt +1 -0
  81. aimodelshare/json_templates/eval/role.txt +1 -0
  82. aimodelshare/json_templates/function/policy.txt +1 -0
  83. aimodelshare/json_templates/function/role.txt +1 -0
  84. aimodelshare/json_templates/integration_response.txt +5 -0
  85. aimodelshare/json_templates/lambda_policy_1.txt +15 -0
  86. aimodelshare/json_templates/lambda_policy_2.txt +8 -0
  87. aimodelshare/json_templates/lambda_role_1.txt +12 -0
  88. aimodelshare/json_templates/lambda_role_2.txt +16 -0
  89. aimodelshare/leaderboard.py +174 -0
  90. aimodelshare/main/1.txt +132 -0
  91. aimodelshare/main/1B.txt +112 -0
  92. aimodelshare/main/2.txt +153 -0
  93. aimodelshare/main/3.txt +134 -0
  94. aimodelshare/main/4.txt +128 -0
  95. aimodelshare/main/5.txt +109 -0
  96. aimodelshare/main/6.txt +105 -0
  97. aimodelshare/main/7.txt +144 -0
  98. aimodelshare/main/8.txt +142 -0
  99. aimodelshare/main/__init__.py +1 -0
  100. aimodelshare/main/authorization.txt +275 -0
  101. aimodelshare/main/eval_classification.txt +79 -0
  102. aimodelshare/main/eval_lambda.txt +1709 -0
  103. aimodelshare/main/eval_regression.txt +80 -0
  104. aimodelshare/main/lambda_function.txt +8 -0
  105. aimodelshare/main/nst.txt +149 -0
  106. aimodelshare/model.py +1543 -0
  107. aimodelshare/modeluser.py +215 -0
  108. aimodelshare/moral_compass/README.md +408 -0
  109. aimodelshare/moral_compass/__init__.py +65 -0
  110. aimodelshare/moral_compass/_version.py +3 -0
  111. aimodelshare/moral_compass/api_client.py +601 -0
  112. aimodelshare/moral_compass/apps/__init__.py +69 -0
  113. aimodelshare/moral_compass/apps/ai_consequences.py +540 -0
  114. aimodelshare/moral_compass/apps/bias_detective.py +714 -0
  115. aimodelshare/moral_compass/apps/ethical_revelation.py +898 -0
  116. aimodelshare/moral_compass/apps/fairness_fixer.py +889 -0
  117. aimodelshare/moral_compass/apps/judge.py +888 -0
  118. aimodelshare/moral_compass/apps/justice_equity_upgrade.py +853 -0
  119. aimodelshare/moral_compass/apps/mc_integration_helpers.py +820 -0
  120. aimodelshare/moral_compass/apps/model_building_game.py +1104 -0
  121. aimodelshare/moral_compass/apps/model_building_game_beginner.py +687 -0
  122. aimodelshare/moral_compass/apps/moral_compass_challenge.py +858 -0
  123. aimodelshare/moral_compass/apps/session_auth.py +254 -0
  124. aimodelshare/moral_compass/apps/shared_activity_styles.css +349 -0
  125. aimodelshare/moral_compass/apps/tutorial.py +481 -0
  126. aimodelshare/moral_compass/apps/what_is_ai.py +853 -0
  127. aimodelshare/moral_compass/challenge.py +365 -0
  128. aimodelshare/moral_compass/config.py +187 -0
  129. aimodelshare/placeholders/model.onnx +0 -0
  130. aimodelshare/placeholders/preprocessor.zip +0 -0
  131. aimodelshare/playground.py +1968 -0
  132. aimodelshare/postprocessormodules.py +157 -0
  133. aimodelshare/preprocessormodules.py +373 -0
  134. aimodelshare/pyspark/1.txt +195 -0
  135. aimodelshare/pyspark/1B.txt +181 -0
  136. aimodelshare/pyspark/2.txt +220 -0
  137. aimodelshare/pyspark/3.txt +204 -0
  138. aimodelshare/pyspark/4.txt +187 -0
  139. aimodelshare/pyspark/5.txt +178 -0
  140. aimodelshare/pyspark/6.txt +174 -0
  141. aimodelshare/pyspark/7.txt +211 -0
  142. aimodelshare/pyspark/8.txt +206 -0
  143. aimodelshare/pyspark/__init__.py +1 -0
  144. aimodelshare/pyspark/authorization.txt +258 -0
  145. aimodelshare/pyspark/eval_classification.txt +79 -0
  146. aimodelshare/pyspark/eval_lambda.txt +1441 -0
  147. aimodelshare/pyspark/eval_regression.txt +80 -0
  148. aimodelshare/pyspark/lambda_function.txt +8 -0
  149. aimodelshare/pyspark/nst.txt +213 -0
  150. aimodelshare/python/my_preprocessor.py +58 -0
  151. aimodelshare/readme.md +26 -0
  152. aimodelshare/reproducibility.py +181 -0
  153. aimodelshare/sam/Dockerfile.txt +8 -0
  154. aimodelshare/sam/Dockerfile_PySpark.txt +24 -0
  155. aimodelshare/sam/__init__.py +1 -0
  156. aimodelshare/sam/buildspec.txt +11 -0
  157. aimodelshare/sam/codebuild_policies.txt +129 -0
  158. aimodelshare/sam/codebuild_trust_relationship.txt +12 -0
  159. aimodelshare/sam/codepipeline_policies.txt +173 -0
  160. aimodelshare/sam/codepipeline_trust_relationship.txt +12 -0
  161. aimodelshare/sam/spark-class.txt +2 -0
  162. aimodelshare/sam/template.txt +54 -0
  163. aimodelshare/tools.py +103 -0
  164. aimodelshare/utils/__init__.py +78 -0
  165. aimodelshare/utils/optional_deps.py +38 -0
  166. aimodelshare/utils.py +57 -0
  167. aimodelshare-0.3.7.dist-info/METADATA +298 -0
  168. aimodelshare-0.3.7.dist-info/RECORD +171 -0
  169. aimodelshare-0.3.7.dist-info/WHEEL +5 -0
  170. aimodelshare-0.3.7.dist-info/licenses/LICENSE +5 -0
  171. aimodelshare-0.3.7.dist-info/top_level.txt +1 -0
@@ -0,0 +1,151 @@
1
+ Supporting Functions
2
+ ====================
3
+
4
+ .. _configure_credentials:
5
+
6
+ configure_credentials()
7
+ -----------------------
8
+
9
+ .. py:function:: aimodelshare.aws.configure_credentials()
10
+
11
+ Return a formatted credentials file built with user inputs.
12
+
13
+ Combine your AI Model Share & AWS credentials into a single ‘credentials.txt' file with the `configure_credentials` function. You only have to make the file once, then you can use it whenever you use the aimodelshare library.
14
+
15
+ Credentials files must follow this format:
16
+
17
+ .. image:: images/creds_file_example.png
18
+ :width: 600
19
+
20
+ The following code will prompt you to provide your credentials one at a time and pre-format a txt file for you to use in the future:
21
+
22
+
23
+ Example ::
24
+
25
+ #install aimodelshare library
26
+ ! pip install aimodelshare
27
+
28
+ # Generate credentials file
29
+ import aimodelshare as ai
30
+ from aimodelshare.aws import configure_credentials
31
+ configure_credentials()
32
+
33
+
34
+ .. _set_credentials:
35
+
36
+ set_credentials()
37
+ -----------------
38
+
39
+ Set credentials for all AI Model Share functions with
40
+ the ``aimodelshare.aws.set_credentials()`` function:
41
+
42
+ .. py:function:: aimodelshare.aws.set_credentials(credential_file="credentials.txt", type="submit_model", apiurl)
43
+
44
+ Set credentials for AI Model Share and Amazon Web Services (AWS).
45
+
46
+ :param credential_file: Path to formatted credentials txt file.
47
+ :type credential_file: string
48
+
49
+ :param type: set to "deploy_model" to deploy a ModelPlayground.
50
+ :type type: string
51
+
52
+ :param apiurl: unique api_url that powers a specific Model Playground.
53
+ :type apiurl: string
54
+
55
+ :return: Success Message.
56
+
57
+ Example ::
58
+
59
+ # Deploying ModelPlaygrounds - Requires AWS credentials
60
+ from aimodelshare.aws import set_credentials
61
+ set_credentials(credential_file="credentials.txt", type="deploy_model")
62
+
63
+ # Submitting Models to Competition - No AWS credentials required
64
+ from aimodelshare.aws import set_credentials
65
+ apiurl="https://example.execute-api.us-east-1.amazonaws.com/prod/m"
66
+ set_credentials(apiurl=apiurl)
67
+
68
+ .. _download_data:
69
+
70
+ download_data()
71
+ ---------------
72
+
73
+ Download data sets that have been shared to AI ModelShare with the ``aimodelshare.data_sharing.download_data()`` function:
74
+
75
+ .. py:function:: aimodelshare.data_sharing.download_data(repository)
76
+
77
+ Download data that has been shared to the AI ModelShare website.
78
+
79
+ :param repository: URI & image_tag of uploaded data (provided with the create_competition method of the Model Playground class)
80
+ :type repository: string
81
+ :return: Success Message & downloaded data directory
82
+
83
+ Example ::
84
+
85
+ from aimodelshare import download_data
86
+ download_data('example-repository:image_tag')
87
+
88
+ .. export_eval_metric:
89
+
90
+ export_eval_metric()
91
+ --------------------
92
+
93
+ .. py:function:: aimodelshare.custom_eval_metrics.export_eval_metric(eval_metric_fxn, directory, name)
94
+
95
+ Export evaluation metric and related objects into zip file for model deployment
96
+
97
+ :param eval_metric_fxn: name of eval metric function (should always be named "eval_metric" to work properly)
98
+ :type eval_metric_fxn: string
99
+ :param directory: folderpath to eval metric function
100
+ use "" to reference current working directory
101
+ :type directory: string
102
+ :param name: name of the custom eval metric
103
+ :type name: string
104
+ :return: file named 'name.zip' in the correct format for model deployment
105
+
106
+ Example ::
107
+
108
+ from aimodelshare import export_eval_metric
109
+ export_eval_metric(eval_metric_fxn, directory, name)
110
+
111
+ .. export_reproducibility_env:
112
+
113
+ export_reproducibility_env()
114
+ ----------------------------
115
+
116
+ .. py:function:: aimodelshare.reproducibility.export_reproducibility_env(seed, directory, mode)
117
+
118
+ Export development environment to enable reproducibility of your model.
119
+
120
+ :param seed: Random Seed
121
+ :type seed: Int
122
+ :directory: Directory for completed json file
123
+ :type directory: string
124
+ :param mode: Processor - either "gpu" or "cpu"
125
+ :type mode: string
126
+ :return: “./reproducibility.json” file to use with submit_model()
127
+
128
+ Example ::
129
+
130
+ from aimodelshare import export_reproducibility_env
131
+ export_eval_metric(seed, directory, mode)
132
+
133
+ .. _share_dataset:
134
+
135
+ share_dataset()
136
+ ---------------
137
+
138
+ Upload data sets to AI ModelShare with the ``aimodelshare.data_sharing.share_dataset()`` function:
139
+
140
+ .. py:function:: aimodelshare.data_sharing.share_dataset(data_directory="folder_file_path",classification="default", private="FALSE")
141
+
142
+ Upload data to the AI ModelShare website.
143
+
144
+ :param data_directory: path to the file directory to upload.
145
+ :type data_directory: string
146
+ :return: Success Message
147
+
148
+ Example ::
149
+
150
+ from aimodelshare.data_sharing.share_data import share_dataset
151
+ share_dataset(data_directory = "example_path", classification="default", private="FALSE")
@@ -0,0 +1,390 @@
1
+ .. _aimodelshare_tutorial:
2
+
3
+ AI Model Share Tutorial
4
+ #######################
5
+
6
+ This tutorial will take you through the core functionality of the AI Model Share library, with the Titanic passenger data set. You will import the Titanic data set, build a machine learning model for tabular classification, deploy an interactive web dashboard (ModelPlayground) powered by a REST API, create a competition, and explore how to learn from the model collaboration process.
7
+
8
+ This tutorial is applicable for all new users, especially those that are interested in publishing their machine learning models on a live webpage to generate predictions, improving on existing models through the model collaboration process, and/or hosting a competition for other users.
9
+
10
+ If you have been invited to participate in an existing competition (or would like to submit a model to any of our ongoing public competitions), you may wish to skip to one of the Model Submission Guides included in the :ref:`example_notebooks`.
11
+
12
+ The only thing to need to complete the tutorial is a computer with an internet connection.
13
+
14
+
15
+ .. _getting_started:
16
+
17
+ Getting Started
18
+ ***************
19
+
20
+ .. _cred_configure:
21
+
22
+ Credential Configuration
23
+ ========================
24
+
25
+ To complete this tutorial, you will need to have a pre-formatted credentials file. Follow the directions :ref:`HERE <create_credentials>` to create one.
26
+
27
+ .. _set_environment:
28
+
29
+ Set Up Environment
30
+ ==================
31
+
32
+ Use your credentials file to set your credentials for all aimodelshare functions. This will give you access to your AI Model Share account and your AWS resources in order to deploy a Model Playground.
33
+
34
+ .. code-block::
35
+
36
+ # Set credentials
37
+ from aimodelshare.aws import set_credentials
38
+ set_credentials(credential_file="credentials.txt", type="deploy_model")
39
+
40
+ # Get materials for tutorial
41
+ import aimodelshare as ai
42
+ X_train, X_test, y_train_labels, y_test, example_data, y_test_labels = ai.import_quickstart_data("titanic")
43
+
44
+
45
+ .. _part_one:
46
+
47
+ Part One: Deploy a Model Playground
48
+ ***********************************
49
+
50
+ This tutorial will use data from the Titanic passenger data set. We will use attributes of the passengers on board to determine whether they survived or died in the 1912 Shipwreck.
51
+
52
+ At the end of part one, you will have built a model for tabular classification which will take passenger characteristics and predict if they survived or died in the Titanic shipwreck. You will have deployed that model into a "Model Playground", which is an interactive web application that will use your model to generate predictions in a user-friendly dashboard. Additionally, users will have access to customized code to use the background REST API to generate predictions programatically.
53
+
54
+ .. _step_one:
55
+
56
+ Step 1: Preprocessor Function & Setup
57
+ =====================================
58
+
59
+ Preprocessor functions are used to preprocess data into the precise format your model requires to generate predictions. An example preprocessor using sklearn's Column Transformer is included below, but you can customize your preprocessor however you see fit.
60
+
61
+ .. note::
62
+ Preprocessor functions should always be named "preprocessor".
63
+
64
+ You can use any Python library in a preprocessor function, but all libraries should be imported inside your preprocessor function.
65
+
66
+ For tabular prediction, models users should minimally include function inputs for an unpreprocessed pandas dataframe. Any categorical features should be preprocessed to one hot encoded numeric values.
67
+
68
+ Set Up Preprocessor::
69
+
70
+ # In this case we use Sklearn's Column transformer in our preprocessor function
71
+ from sklearn.compose import ColumnTransformer
72
+ from sklearn.pipeline import Pipeline
73
+ from sklearn.impute import SimpleImputer
74
+ from sklearn.preprocessing import StandardScaler, OneHotEncoder
75
+
76
+ #Preprocess data using sklearn's Column Transformer approach
77
+
78
+ # We create the preprocessing pipelines for both numeric and categorical data.
79
+ numeric_features = ['age', 'fare']
80
+ numeric_transformer = Pipeline(steps=[
81
+ ('imputer', SimpleImputer(strategy='median')), #'imputer' names the step
82
+ ('scaler', StandardScaler())])
83
+
84
+ categorical_features = ['embarked', 'sex', 'pclass']
85
+
86
+ # Replacing missing values with Modal value and then one-hot encoding.
87
+ categorical_transformer = Pipeline(steps=[
88
+ ('imputer', SimpleImputer(strategy='most_frequent')),
89
+ ('onehot', OneHotEncoder(handle_unknown='ignore'))])
90
+
91
+ # Final preprocessor object set up with ColumnTransformer...
92
+ preprocess = ColumnTransformer(
93
+ transformers=[
94
+ ('num', numeric_transformer, numeric_features),
95
+ ('cat', categorical_transformer, categorical_features)])
96
+
97
+ # fit preprocessor to your data
98
+ preprocess = preprocess.fit(X_train)
99
+
100
+ Preprocessor Function::
101
+
102
+ # Here is where we actually write the preprocessor function:
103
+
104
+ # Write function to transform data with preprocessor
105
+ # In this case we use sklearn's Column transformer in our preprocessor function
106
+
107
+ def preprocessor(data):
108
+ preprocessed_data=preprocess.transform(data)
109
+ return preprocessed_data
110
+
111
+ Check X Data::
112
+
113
+ # check shape of X data after preprocessing it using our new function
114
+ preprocessor(X_train).shape
115
+
116
+ One Hot Encode y_data::
117
+
118
+ # Create one hot encoded data from list of y_train category labels
119
+ #...to allow `ModelPlayground.deploy()` to extract correct labels for predictions in your deployed API
120
+ import pandas as pd
121
+ y_train = pd.get_dummies(y_train_labels)
122
+
123
+ #ensure column names are correct in one hot encoded target for correct label extraction
124
+ list(y_train.columns)
125
+
126
+ .. _step_two:
127
+
128
+ Step 2 - Build Model
129
+ ====================
130
+
131
+ Build Model Using sklearn (or your preferred Machine Learning Library). This is the model that will ultimately power your REST API and Model Playground. The model and preprocessor can be updated at any time by the Model Playground owner.
132
+
133
+ .. code-block::
134
+
135
+ from sklearn.linear_model import LogisticRegression
136
+
137
+ model = LogisticRegression(C=10, penalty='l1', solver = 'liblinear')
138
+ model.fit(preprocessor(X_train), y_train_labels) # Fitting to the training set.
139
+ model.score(preprocessor(X_train), y_train_labels) # Fit score, 0-1 scale.
140
+
141
+ .. _step_three:
142
+
143
+ Step 3 - Save Preprocessor
144
+ ==========================
145
+
146
+ Save preprocessor function to "preprocessor.zip" file. The preprocessor code will be included in the Model Playground and executed to preprocess data submitted for predictions.
147
+
148
+ .. code-block::
149
+
150
+ import aimodelshare as ai
151
+ ai.export_preprocessor(preprocessor,"")
152
+
153
+ .. code-block::
154
+
155
+ # Now let's import and test the preprocessor function to see if it is working...
156
+
157
+ import aimodelshare as ai
158
+ prep=ai.import_preprocessor("preprocessor.zip")
159
+ prep(example_data).shape
160
+
161
+ .. _step_four:
162
+
163
+ Step 4 - Save sklearn Model to Onnx File Format
164
+ ===============================================
165
+
166
+ .. code-block::
167
+
168
+ # Save sklearn model to local ONNX file
169
+ from aimodelshare.aimsonnx import model_to_onnx
170
+
171
+ # Check how many preprocessed input features there are
172
+ from skl2onnx.common.data_types import FloatTensorType
173
+ initial_type = [('float_input', FloatTensorType([None, 10]))] # Insert correct number of features in preprocessed data
174
+
175
+ onnx_model = model_to_onnx(model, framework='sklearn',
176
+ initial_types=initial_type,
177
+ transfer_learning=False,deep_learning=False)
178
+
179
+ with open("model.onnx", "wb") as f:
180
+ f.write(onnx_model.SerializeToString())
181
+
182
+ .. _step_five:
183
+
184
+ Step 5 - Create your Model Playground and Deploy REST API/Live Web-Application
185
+ ==============================================================================
186
+
187
+ .. code-block::
188
+
189
+ #Set up arguments for Model Playground deployment
190
+ import pandas as pd
191
+
192
+ model_filepath="model.onnx"
193
+ preprocessor_filepath="preprocessor.zip"
194
+ exampledata = example_data
195
+
196
+ .. code-block::
197
+
198
+ from aimodelshare import ModelPlayground
199
+
200
+ #Instantiate ModelPlayground() Class
201
+
202
+ myplayground=ModelPlayground(model_type="tabular", classification=True, private=False)
203
+
204
+ # Create Model Playground (generates live rest api and web-app for your model/preprocessor)
205
+
206
+ myplayground.deploy(model_filepath, preprocessor_filepath, y_train_labels, exampledata)
207
+
208
+
209
+ Use your new Model Playground!
210
+ ==============================
211
+
212
+ Follow the link in the output above to:
213
+
214
+ * Generate predictions with your interactive web dashboard.
215
+ * Access example code in Python, R, and Curl.
216
+
217
+ Or, follow the rest of the tutorial to create a competition for your Model Playground and:
218
+
219
+ * Access verified model performance metrics.
220
+ * Upload multiple models to a leaderboard.
221
+ * Easily compare model performance & structure.
222
+
223
+
224
+ .. _part_two:
225
+
226
+ Part Two: Create a Competition
227
+ ******************************
228
+
229
+ After deploying your Model Playground, you can now create a competition. Creating a competition allows you to:
230
+
231
+ * Verify the model performance metrics on aimodelshare.org.
232
+ * Submit models to a leaderboard.
233
+ * Grant access to other users to submit models to the leaderboard.
234
+ * Easily compare model performance and structure.
235
+
236
+ .. code-block::
237
+
238
+ # Create list of authorized participants for competition
239
+ # Note that participants should use the same email address when creating modelshare.org account
240
+
241
+ emaillist=["emailaddress1@email.com", "emailaddress2@email.com", "emailaddress3@email.com"]
242
+
243
+ .. code-block::
244
+
245
+ # Create Competition
246
+ # Note -- Make competition public (allow any AI Model Share user to submit models)
247
+ # .... by excluding the email_list argument and including the 'public=True' argument
248
+
249
+ myplayground.create_competition(data_directory='titanic_competition_data',
250
+ y_test = y_test_labels,
251
+ # email_list=emaillist)
252
+ public=True)
253
+
254
+ .. code-block::
255
+
256
+ #Instantiate Competition
257
+ #--Note: If you start a new session, the first argument should be the Model Playground url in quotes.
258
+ #--e.g.- mycompetition= ai.Competition("https://2121212.execute-api.us-east-1.amazonaws.com/prod/m)
259
+ #See Model Playground "Compete" tab for example model submission code.
260
+
261
+ mycompetition= ai.Competition(myplayground.playground_url)
262
+
263
+ .. code-block::
264
+
265
+ # Add, remove, or completely update authorized participants for competition later
266
+ emaillist=["emailaddress4@email.com"]
267
+ mycompetition.update_access_list(email_list=emaillist,update_type="Add")
268
+
269
+ .. _submit_models_to_comp:
270
+
271
+ Submit Models
272
+ =============
273
+
274
+ After a competition is created, users can submit models to be tracked in the competition leaderboard. When models are submitted, model metadata is extracted and model performance metrics are generated.
275
+
276
+ .. note::
277
+ There may be two leaderboards associated with every competition: a "public" leaderboard, visible to everyone with access to the competition, and a "private" leaderboard, visible to only the competition owner. Competition owners may choose to create the private leaderboard for the purpose of evaluating models with a special subset of held out y-test data. This encourages the development of models that are generalizable to additional real-world data, and not overfit to a specific split of data.
278
+
279
+
280
+ .. code-block::
281
+
282
+ #Authorized users can submit new models after setting credentials using modelshare.org username/password
283
+ from aimodelshare.aws import set_credentials
284
+
285
+ apiurl=myplayground.playground_url # example url from deployed playground: apiurl= "https://123456.execute-api.us-east-1.amazonaws.com/prod/m
286
+
287
+ set_credentials(apiurl=apiurl)
288
+
289
+ .. code-block::
290
+
291
+ #Submit Model 1:
292
+
293
+ #-- Generate predicted values (a list of predicted labels "survived" or "died") (Model 1)
294
+ prediction_labels = model.predict(preprocessor(X_test))
295
+
296
+ # Submit Model 1 to Competition Leaderboard
297
+ mycompetition.submit_model(model_filepath = "model.onnx",
298
+ preprocessor_filepath="preprocessor.zip",
299
+ prediction_submission=prediction_labels)
300
+
301
+ Create, save, and submit a second model::
302
+
303
+ # Create model 2 (L2 Regularization - Ridge)
304
+ from sklearn.linear_model import LogisticRegression
305
+
306
+ model_2 = LogisticRegression(C=.01, penalty='l2')
307
+ model_2.fit(preprocessor(X_train), y_train_labels) # Fitting to the training set.
308
+ model_2.score(preprocessor(X_train), y_train_labels) # Fit score, 0-1 scale.
309
+
310
+ .. code-block::
311
+
312
+ # Save Model 2 to .onnx file
313
+
314
+ # How many preprocessed input features there are
315
+ from skl2onnx.common.data_types import FloatTensorType
316
+ initial_type = [('float_input', FloatTensorType([None, 10]))]
317
+
318
+ onnx_model = model_to_onnx(model_2, framework='sklearn',
319
+ initial_types=initial_type,
320
+ transfer_learning=False,
321
+ deep_learning=False)
322
+
323
+ # Save model to local .onnx file
324
+ with open("model_2.onnx", "wb") as f:
325
+ f.write(onnx_model.SerializeToString())
326
+
327
+ .. code-block::
328
+
329
+ # Submit Model 2
330
+
331
+ #-- Generate predicted y values (Model 2)
332
+ prediction_labels = model_2.predict(preprocessor(X_test))
333
+
334
+ # Submit Model 2 to Competition Leaderboard
335
+ mycompetition.submit_model(model_filepath = "model_2.onnx",
336
+ prediction_submission=prediction_labels,
337
+ preprocessor_filepath="preprocessor.zip")
338
+
339
+ .. _learn:
340
+
341
+ Learn From Submitted Models
342
+ ===========================
343
+
344
+ The leaderboard is a helpful tool for not only examining your model's current standing in an active competition, but also for learning about which model structures most and least effective for a particular data set. Authorized competition users can download the current leaderboard for an overall understanding of model metadata and ranking, and then compare certain models to examine their metadata more closely.
345
+
346
+
347
+ Get Leaderboard::
348
+
349
+ data = mycompetition.get_leaderboard()
350
+ mycompetition.stylize_leaderboard(data)
351
+
352
+ Compare Models::
353
+
354
+ # Compare two or more models
355
+ data=mycompetition.compare_models([1,2], verbose=1)
356
+ mycompetition.stylize_compare(data)
357
+
358
+ .. note::
359
+ ``Competition.compare_models()`` is maximally useful for comparing models with the same basic structure.
360
+
361
+ Users can also check the structure of the y test data. This helps users understand how to submit predicted values to leaderboard.
362
+
363
+ Check Structure of y-test data::
364
+
365
+ mycompetition.inspect_y_test()
366
+
367
+ .. _part_three:
368
+
369
+ Part Three: Maintaining your Model Playground
370
+ *********************************************
371
+
372
+ Update Runtime model
373
+
374
+ Use this function to:
375
+
376
+ #. Update the prediction API behind your Model Playground with a new model, chosen from the leaderboard, and.
377
+ #. Verify the model performance metrics in your Model Playground.
378
+
379
+ .. code-block::
380
+
381
+ myplayground.update_runtime_model(model_version=1)
382
+
383
+
384
+ Delete Deployment
385
+
386
+ Use this function to delete the entire Model Playground, including the REST API, web dashboard, competition, and all submitted models
387
+
388
+ .. code-block::
389
+
390
+ myplayground.delete_deployment()
@@ -0,0 +1,110 @@
1
+ .. AIModelShare documentation master file, created by
2
+ sphinx-quickstart on Sun Feb 6 18:05:24 2022.
3
+ You can adapt this file completely to your liking, but it should at least
4
+ contain the root `toctree` directive.
5
+
6
+ Welcome to AIModelShare's documentation!
7
+ ========================================
8
+
9
+ Installing the Library
10
+ ----------------------
11
+
12
+ To install the AIModelShare library using PyPI:
13
+ ***********************************************
14
+
15
+ ::
16
+
17
+ # Install aimodelshare library
18
+ ! pip install aimodelshare
19
+
20
+ To install the AIModelShare library on Anaconda:
21
+ ************************************************
22
+
23
+ *Conda/Mamba Install (For Mac and Linux Users Only, Windows Users should use pip method):*
24
+
25
+ Make sure you have conda >=4.9.
26
+
27
+ You can check your conda version with: ::
28
+
29
+ conda --version
30
+
31
+ To update conda, use: ::
32
+
33
+ conda update conda
34
+
35
+ Installing aimodelshare from the ``conda-forge`` channel can be achieved by adding ``conda-forge`` to your channels with: ::
36
+
37
+ conda config --add channels conda-forge
38
+ conda config --set channel_priority strict
39
+
40
+ Once the conda-forge channel has been enabled, aimodelshare can be installed with conda: ::
41
+
42
+ conda install aimodelshare
43
+
44
+ Or with mamba: ::
45
+
46
+ mamba install aimodelshare
47
+
48
+
49
+ To fully utilize the library functionality, you will need to set up credentials with the `AI Model Share website <https://www.modelshare.org/login>`_ and/or with `Amazon Web Services. <https://aws.amazon.com/free>`_ See the credentials user guide :ref:`HERE. <create_credentials>`
50
+
51
+ AI Model Share Tutorial
52
+ -----------------------
53
+ .. toctree::
54
+
55
+ gettingstarted
56
+
57
+ Generating Credentials
58
+ ----------------------
59
+ .. toctree::
60
+
61
+ create_credentials
62
+
63
+ AI Model Share Example Notebooks
64
+ --------------------------------
65
+ .. toctree::
66
+
67
+ example_notebooks
68
+
69
+ AI Model Share Classes
70
+ ----------------------
71
+ .. toctree::
72
+
73
+ modelplayground
74
+ competition
75
+
76
+
77
+ AI Model Share Supporting Functions
78
+ -----------------------------------
79
+ .. toctree::
80
+
81
+ functions
82
+
83
+ AI Model Share Advanced Features
84
+ --------------------------------
85
+ .. toctree::
86
+
87
+ advanced_features
88
+
89
+ About Us
90
+ ---------
91
+ .. toctree::
92
+
93
+ about
94
+
95
+
96
+ .. toctree::
97
+ :maxdepth: 2
98
+ :caption: Contents:
99
+
100
+
101
+
102
+ Indices and tables
103
+ ==================
104
+
105
+ * :ref:`genindex`
106
+ * :ref:`search`
107
+
108
+ License
109
+ -------
110
+ AI | Model Share - All rights reserved. `Terms of Service <https://www.modelshare.org/terms-of-service>`_ and `Privacy Policy. <https://www.modelshare.org/privacy-policy>`_