aimodelshare 0.1.21__py3-none-any.whl → 0.1.62__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aimodelshare might be problematic. Click here for more details.

Files changed (35) hide show
  1. aimodelshare/__init__.py +94 -14
  2. aimodelshare/aimsonnx.py +417 -262
  3. aimodelshare/api.py +8 -7
  4. aimodelshare/auth.py +163 -0
  5. aimodelshare/aws.py +4 -4
  6. aimodelshare/base_image.py +1 -1
  7. aimodelshare/containerisation.py +1 -1
  8. aimodelshare/data_sharing/download_data.py +145 -88
  9. aimodelshare/generatemodelapi.py +7 -6
  10. aimodelshare/main/eval_lambda.txt +81 -13
  11. aimodelshare/model.py +493 -197
  12. aimodelshare/modeluser.py +89 -1
  13. aimodelshare/moral_compass/README.md +408 -0
  14. aimodelshare/moral_compass/__init__.py +37 -0
  15. aimodelshare/moral_compass/_version.py +3 -0
  16. aimodelshare/moral_compass/api_client.py +601 -0
  17. aimodelshare/moral_compass/apps/__init__.py +17 -0
  18. aimodelshare/moral_compass/apps/tutorial.py +198 -0
  19. aimodelshare/moral_compass/challenge.py +365 -0
  20. aimodelshare/moral_compass/config.py +187 -0
  21. aimodelshare/playground.py +26 -14
  22. aimodelshare/preprocessormodules.py +60 -6
  23. aimodelshare/reproducibility.py +20 -5
  24. aimodelshare/utils/__init__.py +78 -0
  25. aimodelshare/utils/optional_deps.py +38 -0
  26. aimodelshare-0.1.62.dist-info/METADATA +298 -0
  27. {aimodelshare-0.1.21.dist-info → aimodelshare-0.1.62.dist-info}/RECORD +30 -22
  28. {aimodelshare-0.1.21.dist-info → aimodelshare-0.1.62.dist-info}/WHEEL +1 -1
  29. aimodelshare-0.1.62.dist-info/licenses/LICENSE +5 -0
  30. {aimodelshare-0.1.21.dist-info → aimodelshare-0.1.62.dist-info}/top_level.txt +0 -1
  31. aimodelshare-0.1.21.dist-info/LICENSE +0 -22
  32. aimodelshare-0.1.21.dist-info/METADATA +0 -68
  33. tests/__init__.py +0 -0
  34. tests/test_aimsonnx.py +0 -135
  35. tests/test_playground.py +0 -721
tests/test_playground.py DELETED
@@ -1,721 +0,0 @@
1
- from aimodelshare.playground import ModelPlayground, Experiment, Competition
2
- from aimodelshare.aws import set_credentials, get_aws_token
3
- import aimodelshare as ai
4
- from aimodelshare.data_sharing.utils import redo_with_write
5
-
6
- from unittest.mock import patch
7
-
8
- from sklearn.compose import ColumnTransformer
9
- from sklearn.pipeline import Pipeline
10
- from sklearn.impute import SimpleImputer
11
- from sklearn.preprocessing import StandardScaler, OneHotEncoder
12
- from sklearn.linear_model import LogisticRegression
13
-
14
- import pandas as pd
15
- import shutil
16
- import os
17
-
18
-
19
-
20
- def test_configure_credentials():
21
-
22
- # when testing locally, we can set credentials from file
23
- try:
24
- set_credentials(credential_file="../../../credentials.txt", type="deploy_model")
25
- except Exception as e:
26
- print(e)
27
-
28
- try:
29
- set_credentials(credential_file="../../credentials.txt", type="deploy_model")
30
- except Exception as e:
31
- print(e)
32
-
33
- # mock user input
34
- inputs = [os.environ.get('USERNAME'),
35
- os.environ.get('PASSWORD'),
36
- os.environ.get('AWS_ACCESS_KEY_ID'),
37
- os.environ.get('AWS_SECRET_ACCESS_KEY'),
38
- os.environ.get('AWS_REGION')]
39
-
40
-
41
- with patch("getpass.getpass", side_effect=inputs):
42
- from aimodelshare.aws import configure_credentials
43
- configure_credentials()
44
-
45
- # clean up credentials file
46
- os.remove("credentials.txt")
47
-
48
-
49
- def test_playground_sklearn():
50
-
51
- # when testing locally, we can set credentials from file
52
- try:
53
- set_credentials(credential_file="../../../credentials.txt", type="deploy_model")
54
- except Exception as e:
55
- print(e)
56
-
57
- try:
58
- set_credentials(credential_file="../../credentials.txt", type="deploy_model")
59
- except Exception as e:
60
- print(e)
61
-
62
- # mock user input
63
- inputs = [os.environ.get('USERNAME'),
64
- os.environ.get('PASSWORD'),
65
- os.environ.get('AWS_ACCESS_KEY_ID'),
66
- os.environ.get('AWS_SECRET_ACCESS_KEY'),
67
- os.environ.get('AWS_REGION')]
68
-
69
- with patch("getpass.getpass", side_effect=inputs):
70
- from aimodelshare.aws import configure_credentials
71
- configure_credentials()
72
-
73
- # set credentials
74
- set_credentials(credential_file="credentials.txt", type="deploy_model")
75
- #os.environ["AWS_TOKEN"]=get_aws_token()
76
-
77
- # clean up credentials file
78
- os.remove("credentials.txt")
79
-
80
- # Get materials for tutorial
81
- X_train, X_test, y_train, y_test, example_data, y_test_labels = ai.import_quickstart_data("titanic")
82
-
83
- # We create the preprocessing pipelines for both numeric and categorical data.
84
- numeric_features = ['age', 'fare']
85
- numeric_transformer = Pipeline(steps=[
86
- ('imputer', SimpleImputer(strategy='median')), #'imputer' names the step
87
- ('scaler', StandardScaler())])
88
-
89
- categorical_features = ['embarked', 'sex', 'pclass']
90
-
91
- # Replacing missing values with Modal value and then one-hot encoding.
92
- categorical_transformer = Pipeline(steps=[
93
- ('imputer', SimpleImputer(strategy='most_frequent')),
94
- ('onehot', OneHotEncoder(handle_unknown='ignore'))])
95
-
96
- # Final preprocessor object set up with ColumnTransformer...
97
- preprocess = ColumnTransformer(
98
- transformers=[
99
- ('num', numeric_transformer, numeric_features),
100
- ('cat', categorical_transformer, categorical_features)])
101
-
102
- # fit preprocessor to your data
103
- preprocess = preprocess.fit(X_train)
104
-
105
- # Write function to transform data with preprocessor
106
- # In this case we use sklearn's Column transformer in our preprocessor function
107
- def preprocessor(data):
108
- preprocessed_data=preprocess.transform(data)
109
- return preprocessed_data
110
-
111
- # check shape of X data after preprocessing it using our new function
112
- assert preprocessor(X_train).shape == (1047, 10)
113
-
114
- # build model 1
115
- model = LogisticRegression(C=10, penalty='l1', solver = 'liblinear')
116
- model.fit(preprocessor(X_train), y_train)
117
- model.score(preprocessor(X_train), y_train)
118
-
119
- # generate predictions
120
- prediction_labels = model.predict(preprocessor(X_test))
121
-
122
- # Instantiate ModelPlayground() Class
123
- myplayground=ModelPlayground(input_type="tabular", task_type="classification", private=True)
124
-
125
- # Create Model Playground page
126
- myplayground.create(eval_data = y_test_labels)
127
-
128
- # Submit Model to Experiment Leaderboard
129
- myplayground.submit_model(model = model,
130
- preprocessor=preprocessor,
131
- prediction_submission=prediction_labels,
132
- input_dict={"description": "", "tags": ""},
133
- submission_type="all")
134
-
135
- # build model 2
136
- model_2 = LogisticRegression(C=.01, penalty='l2')
137
- model_2.fit(preprocessor(X_train), y_train) # Fitting to the training set.
138
- model_2.score(preprocessor(X_train), y_train) # Fit score, 0-1 scale.
139
-
140
- # generate predictions
141
- prediction_labels = model_2.predict(preprocessor(X_test))
142
-
143
- # Submit Model 2 to Experiment
144
- myplayground.submit_model(model= model_2,
145
- preprocessor=preprocessor,
146
- prediction_submission=prediction_labels,
147
- input_dict={"description": "", "tags": ""},
148
- submission_type="all")
149
-
150
- #submit model through competition
151
- mycompetition = ai.playground.Competition(myplayground.playground_url)
152
- mycompetition.submit_model(model=model_2,
153
- preprocessor=preprocessor,
154
- prediction_submission=prediction_labels,
155
- input_dict={"description": "", "tags": ""}
156
- )
157
-
158
- #submit model through experiment
159
- myexperiment = ai.playground.Experiment(myplayground.playground_url)
160
- myexperiment.submit_model(model=model_2,
161
- preprocessor=preprocessor,
162
- prediction_submission=prediction_labels,
163
- input_dict={"description": "", "tags": ""}
164
- )
165
-
166
- # Check Competition Leaderboard
167
- data = myplayground.get_leaderboard()
168
- myplayground.stylize_leaderboard(data)
169
- assert isinstance(data, pd.DataFrame)
170
-
171
- # Compare two or more models
172
- data = myplayground.compare_models([1,2], verbose=1)
173
- myplayground.stylize_compare(data)
174
- assert isinstance(data, (pd.DataFrame, dict))
175
-
176
- # Check structure of evaluation data
177
- data = myplayground.inspect_eval_data()
178
- assert isinstance(data, dict)
179
-
180
- # deploy model
181
- myplayground.deploy_model(model_version=1, example_data=example_data, y_train=y_train)
182
-
183
- # update example data
184
- myplayground.update_example_data(example_data)
185
-
186
- # swap out runtime model
187
- myplayground.update_runtime_model(model_version=1)
188
-
189
- # delete
190
- myplayground.delete_deployment(confirmation=False)
191
-
192
- # local cleanup
193
- shutil.rmtree("titanic_competition_data", onerror=redo_with_write)
194
- shutil.rmtree("titanic_quickstart", onerror=redo_with_write)
195
-
196
-
197
-
198
- def test_playground_keras():
199
-
200
- # when testing locally, we can set credentials from file
201
- try:
202
- set_credentials(credential_file="../../../credentials.txt", type="deploy_model")
203
- except Exception as e:
204
- print(e)
205
-
206
- try:
207
- set_credentials(credential_file="../../credentials.txt", type="deploy_model")
208
- except Exception as e:
209
- print(e)
210
-
211
- # mock user input
212
- inputs = [os.environ.get('USERNAME'),
213
- os.environ.get('PASSWORD'),
214
- os.environ.get('AWS_ACCESS_KEY_ID'),
215
- os.environ.get('AWS_SECRET_ACCESS_KEY'),
216
- os.environ.get('AWS_REGION')]
217
-
218
- with patch("getpass.getpass", side_effect=inputs):
219
- from aimodelshare.aws import configure_credentials
220
- configure_credentials()
221
-
222
- # set credentials
223
- set_credentials(credential_file="credentials.txt", type="deploy_model")
224
- # os.environ["AWS_TOKEN"]=get_aws_token()
225
-
226
- # clean up credentials file
227
- os.remove("credentials.txt")
228
-
229
- # # Download flower image data and and pretrained Keras models
230
- from aimodelshare.data_sharing.download_data import import_quickstart_data
231
- keras_model, y_train_labels = import_quickstart_data("flowers")
232
- keras_model_2, y_test_labels = import_quickstart_data("flowers", "competition")
233
-
234
- # Here is a pre-designed preprocessor, but you could also build your own to prepare the data differently
235
- def preprocessor(image_filepath, shape=(192, 192)):
236
- """
237
- This function preprocesses reads in images, resizes them to a fixed shape and
238
- min/max transforms them before converting feature values to float32 numeric values
239
- required by onnx files.
240
-
241
- params:
242
- image_filepath
243
- full filepath of a particular image
244
-
245
- returns:
246
- X
247
- numpy array of preprocessed image data
248
- """
249
-
250
- import cv2
251
- import numpy as np
252
-
253
- "Resize a color image and min/max transform the image"
254
- img = cv2.imread(image_filepath) # Read in image from filepath.
255
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # cv2 reads in images in order of blue green and red, we reverse the order for ML.
256
- img = cv2.resize(img, shape) # Change height and width of image.
257
- img = img / 255.0 # Min-max transform.
258
-
259
-
260
- # Resize all the images...
261
- X = np.array(img)
262
- X = np.expand_dims(X, axis=0) # Expand dims to add "1" to object shape [1, h, w, channels] for keras model.
263
- X = np.array(X, dtype=np.float32) # Final shape for onnx runtime.
264
- return X
265
-
266
- # Preprocess X_test image data to generate predictions from models
267
- import numpy as np
268
-
269
- # Generate filenames
270
- file_names = [('flower_competition_data/test_images/' + str(i) + '.jpg') for i in range(1, 735)]
271
-
272
- # Apply preprocessor to image data
273
- preprocessed_image_data = [preprocessor(x) for x in file_names]
274
-
275
- # Create single X_test array from preprocessed images
276
- X_test = np.vstack(preprocessed_image_data)
277
-
278
- # One-hot encode y_train labels (y_train.columns used to generate prediction labels below)
279
- import pandas as pd
280
- y_train = pd.get_dummies(y_train_labels)
281
-
282
- # Generate predicted y values
283
- prediction_column_index=keras_model.predict(X_test).argmax(axis=1)
284
-
285
- # Extract correct prediction labels
286
- prediction_labels = [y_train.columns[i] for i in prediction_column_index]
287
-
288
- # Instantiate Model Playground object
289
- from aimodelshare.playground import ModelPlayground
290
- myplayground=ModelPlayground(input_type="image", task_type="classification", private=False)
291
- # Create Model Playground Page on modelshare.ai website
292
- myplayground.create(eval_data=y_test_labels)
293
-
294
- # Submit Model to Experiment Leaderboard
295
- myplayground.submit_model(model=keras_model,
296
- preprocessor=preprocessor,
297
- prediction_submission=prediction_labels,
298
- input_dict={"description": "", "tags": ""},
299
- submission_type="all")
300
-
301
- # Deploy model by version number
302
- myplayground.deploy_model(model_version=1, example_data="quickstart_materials/example_data", y_train=y_train)
303
-
304
- # example url from deployed playground: apiurl= "https://123456.execute-api.us-east-1.amazonaws.com/prod/m
305
- apiurl=myplayground.playground_url
306
-
307
- # Submit Model 2
308
- # Generate predicted y values (Model 2)
309
- prediction_column_index=keras_model_2.predict(X_test).argmax(axis=1)
310
-
311
- # extract correct prediction labels (Model 2)
312
- prediction_labels = [y_train.columns[i] for i in prediction_column_index]
313
-
314
- # Submit Model 2 to Experiment Leaderboard
315
- myplayground.submit_model(model=keras_model_2,
316
- preprocessor=preprocessor,
317
- prediction_submission=prediction_labels,
318
- input_dict={"description": "", "tags": ""},
319
- submission_type="all")
320
-
321
- #submit model through competition
322
- mycompetition = ai.playground.Competition(myplayground.playground_url)
323
- mycompetition.submit_model(model=keras_model_2,
324
- preprocessor=preprocessor,
325
- prediction_submission=prediction_labels,
326
- input_dict={"description": "", "tags": ""}
327
- )
328
-
329
- #submit model through experiment
330
- myexperiment = ai.playground.Experiment(myplayground.playground_url)
331
- myexperiment.submit_model(model=keras_model_2,
332
- preprocessor=preprocessor,
333
- prediction_submission=prediction_labels,
334
- input_dict={"description": "", "tags": ""}
335
- )
336
-
337
- # Check experiment leaderboard
338
- data = myplayground.get_leaderboard()
339
- myplayground.stylize_leaderboard(data)
340
- assert isinstance(data, pd.DataFrame)
341
-
342
- # Compare two or more models
343
- data = myplayground.compare_models([1,2], verbose=1)
344
- myplayground.stylize_compare(data)
345
- assert isinstance(data, (pd.DataFrame, dict))
346
-
347
- # Check structure of evaluation data
348
- data = myplayground.inspect_eval_data()
349
- assert isinstance(data, dict)
350
-
351
- # Update runtime model
352
- myplayground.update_runtime_model(model_version=2)
353
-
354
- # delete
355
- myplayground.delete_deployment(confirmation=False)
356
-
357
- # local cleanup
358
- shutil.rmtree("flower_competition_data", onerror=redo_with_write)
359
- shutil.rmtree("quickstart_materials", onerror=redo_with_write)
360
- shutil.rmtree("quickstart_flowers_competition", onerror=redo_with_write)
361
-
362
-
363
- def test_playground_pytorch():
364
-
365
- # when testing locally, we can set credentials from file
366
- try:
367
- set_credentials(credential_file="../../../credentials.txt", type="deploy_model")
368
- except Exception as e:
369
- print(e)
370
-
371
- try:
372
- set_credentials(credential_file="../../credentials.txt", type="deploy_model")
373
- except Exception as e:
374
- print(e)
375
-
376
- # mock user input
377
- inputs = [os.environ.get('USERNAME'),
378
- os.environ.get('PASSWORD'),
379
- os.environ.get('AWS_ACCESS_KEY_ID'),
380
- os.environ.get('AWS_SECRET_ACCESS_KEY'),
381
- os.environ.get('AWS_REGION')]
382
-
383
- with patch("getpass.getpass", side_effect=inputs):
384
- from aimodelshare.aws import configure_credentials
385
- configure_credentials()
386
-
387
- # set credentials
388
- set_credentials(credential_file="credentials.txt", type="deploy_model")
389
- # os.environ["AWS_TOKEN"]=get_aws_token()
390
-
391
- # clean up credentials file
392
- os.remove("credentials.txt")
393
-
394
- # # Download flower image data # Download flower image file (jpg) dataset
395
- import aimodelshare as ai
396
- ai.download_data("public.ecr.aws/y2e2a1d6/flower-competition-data-repository:latest")
397
-
398
- # Extract filepaths to use to import and preprocess image files...
399
- base_path = 'flower-competition-data/train_images'
400
- categories = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']
401
-
402
- # Load file paths to fnames list object...
403
- fnames = []
404
-
405
- for category in categories:
406
- flower_folder = os.path.join(base_path, category)
407
- file_names = os.listdir(flower_folder)
408
- full_path = [os.path.join(flower_folder, file_name) for file_name in file_names]
409
- fnames.append(full_path)
410
-
411
- # Here is a pre-designed preprocessor, but you could also build your own to prepare the data differently
412
-
413
- def preprocessor(data, shape=(128, 128)):
414
- """
415
- This function preprocesses reads in images, resizes them to a fixed shape and
416
- min/max transforms them before converting feature values to float32 numeric values
417
- required by onnx files.
418
-
419
- params:
420
- data
421
- list of unprocessed images
422
-
423
- returns:
424
- X
425
- numpy array of preprocessed image data
426
-
427
- """
428
-
429
- import cv2
430
- import numpy as np
431
-
432
- "Resize a color image and min/max transform the image"
433
- img = cv2.imread(data) # Read in image from filepath.
434
- img = cv2.cvtColor(img,
435
- cv2.COLOR_BGR2RGB) # cv2 reads in images in order of blue green and red, we reverse the order for ML.
436
- img = cv2.resize(img, shape) # Change height and width of image.
437
- img = img / 255.0 # Min-max transform.
438
-
439
- # Resize all the images...
440
- X = np.array(img)
441
- X = np.expand_dims(X, axis=0) # Expand dims to add "1" to object shape [1, h, w, channels].
442
- X = np.array(X, dtype=np.float32) # Final shape for onnx runtime.
443
-
444
- # transpose image to pytorch format
445
- X = np.transpose(X, (0, 3, 1, 2))
446
-
447
- return X
448
-
449
- # Import image, load to array of shape height, width, channels, then min/max transform...
450
-
451
- # Read in all images from filenames...
452
- preprocessed_image_data = [preprocessor(x) for x in fnames[0] + fnames[1] + fnames[2] + fnames[3] + fnames[4]]
453
-
454
- # models require object to be an array rather than a list. (vstack converts above list to array object.)
455
- import numpy as np
456
- X = np.vstack(
457
- preprocessed_image_data) # Assigning to X to highlight that this represents feature input data for our model.
458
-
459
- # Create y training label data made up of correctly ordered labels from file folders...
460
- from itertools import repeat
461
-
462
- daisy = list(repeat("daisy", 507)) # i.e.: 507 filenames in daisy folder
463
- dandelion = list(repeat("dandelion", 718))
464
- roses = list(repeat("roses", 513))
465
- sunflowers = list(repeat("sunflowers", 559))
466
- tulips = list(repeat("tulips", 639))
467
-
468
- # Combine into single list of y labels...
469
- y_labels = daisy + dandelion + roses + sunflowers + tulips
470
-
471
- # Check length, same as X above...
472
- len(y_labels)
473
-
474
- # get numerical representation of y labels
475
- import pandas as pd
476
- y_labels_num = pd.DataFrame(y_labels)[0].map(
477
- {'daisy': 4, 'dandelion': 1, # `data_paths` has 'daisy', 'dandelion', 'sunflowers', 'roses', 'tulips'...
478
- 'sunflowers': 2, 'roses': 3, 'tulips': 0}) # ...but `image_paths` has 'tulips' first, and 'daisy' last.
479
-
480
- y_labels_num = list(y_labels_num)
481
-
482
- # train_test_split resized images...
483
- from sklearn.model_selection import train_test_split
484
-
485
- X_train, X_test, y_train, y_test = train_test_split(X, y_labels_num,
486
- stratify=y_labels_num,
487
- test_size=0.20,
488
- random_state=1987)
489
-
490
- import torch
491
-
492
- # Get cpu or gpu device for training.
493
- device = "cuda" if torch.cuda.is_available() else "cpu"
494
- print(f"Using {device} device")
495
-
496
- from torch.utils.data import DataLoader, TensorDataset
497
-
498
- # prepare datasets for pytorch dataloader
499
- tensor_X_train = torch.Tensor(X_train)
500
- tensor_y_train = torch.tensor(y_train, dtype=torch.long)
501
- train_ds = TensorDataset(tensor_X_train, tensor_y_train)
502
-
503
- tensor_X_test = torch.Tensor(X_test)
504
- tensor_y_test = torch.tensor(y_test, dtype=torch.long)
505
- test_ds = TensorDataset(tensor_X_test, tensor_y_test)
506
-
507
- # set up dataloaders
508
- batch_size = 50
509
- train_dataloader = DataLoader(train_ds, batch_size=batch_size, shuffle=False)
510
- test_dataloader = DataLoader(test_ds, batch_size=batch_size, shuffle=False)
511
-
512
- from torch import nn
513
-
514
- # Define pytorch model
515
- class NeuralNetwork(nn.Module):
516
- def __init__(self):
517
- super(NeuralNetwork, self).__init__()
518
- self.flatten = nn.Flatten()
519
- self.linear_relu_stack = nn.Sequential(
520
- nn.Linear(128 * 128 * 3, 512),
521
- nn.ReLU(),
522
- nn.Linear(512, 512),
523
- nn.ReLU(),
524
- nn.Linear(512, 5)
525
- )
526
-
527
- def forward(self, x):
528
- x = self.flatten(x)
529
- logits = self.linear_relu_stack(x)
530
- return logits
531
-
532
- model = NeuralNetwork().to(device)
533
- print(model)
534
-
535
- # set up loss function and optimizer
536
- loss_fn = nn.CrossEntropyLoss()
537
- optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
538
-
539
- # define training function
540
- def train(dataloader, model, loss_fn, optimizer):
541
- size = len(dataloader.dataset)
542
- model.train()
543
- for batch, (X, y) in enumerate(dataloader):
544
- X, y = X.to(device), y.to(device)
545
-
546
- # Compute prediction error
547
- pred = model(X)
548
- loss = loss_fn(pred, y)
549
-
550
- # Backpropagation
551
- optimizer.zero_grad()
552
- loss.backward()
553
- optimizer.step()
554
-
555
- if batch % 100 == 0:
556
- loss, current = loss.item(), batch * len(X)
557
- print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
558
-
559
- # define testing function
560
- def test(dataloader, model, loss_fn):
561
- size = len(dataloader.dataset)
562
- num_batches = len(dataloader)
563
- model.eval()
564
- test_loss, correct = 0, 0
565
- with torch.no_grad():
566
- for X, y in dataloader:
567
- X, y = X.to(device), y.to(device)
568
- pred = model(X)
569
- test_loss += loss_fn(pred, y).item()
570
- correct += (pred.argmax(1) == y).type(torch.float).sum().item()
571
- test_loss /= num_batches
572
- correct /= size
573
- print(f"Test Error: \n Accuracy: {(100 * correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
574
-
575
- epochs = 2
576
- for t in range(epochs):
577
- print(f"Epoch {t + 1}\n-------------------------------")
578
- train(train_dataloader, model, loss_fn, optimizer)
579
- test(test_dataloader, model, loss_fn)
580
- print("Done!")
581
-
582
- # -- Generate predicted y values (Model 1)
583
- # Note: returns the predicted column index location for classification models
584
- if torch.cuda.is_available():
585
- prediction_column_index = model(tensor_X_test.cuda()).argmax(axis=1)
586
- else:
587
- prediction_column_index = model(tensor_X_test).argmax(axis=1)
588
-
589
- # extract correct prediction labels
590
- prediction_labels = [['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips'][i] for i in prediction_column_index]
591
-
592
- # Create labels for y_test
593
- y_test_labels = [['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips'][i] for i in y_test]
594
-
595
- # Create labels for y_train
596
- y_train_labels = [['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips'][i] for i in y_train]
597
-
598
- # Instantiate Model Playground object
599
- from aimodelshare.playground import ModelPlayground
600
- myplayground = ModelPlayground(input_type="image", task_type="classification", private=False)
601
-
602
- # Create Model Playground Page on modelshare.ai website
603
- myplayground.create(eval_data=y_test_labels)
604
-
605
- if torch.cuda.is_available():
606
- example_input = torch.randn(1, 3, 128, 128, requires_grad=True).cuda()
607
- else:
608
- example_input = torch.randn(1, 3, 128, 128, requires_grad=True)
609
-
610
-
611
- # Submit Model to Experiment Leaderboard
612
- myplayground.submit_model(model=model,
613
- preprocessor=preprocessor,
614
- prediction_submission=prediction_labels,
615
- input_dict={"description": "", "tags": ""},
616
- submission_type="all",
617
- model_input = example_input)
618
-
619
-
620
- # Create example data folder to provide on model playground page
621
- # for users to test prediction REST API
622
- import shutil
623
- os.mkdir('example_data')
624
- example_images = ["flower-competition-data/train_images/daisy/100080576_f52e8ee070_n.jpg",
625
- "flower-competition-data/train_images/dandelion/10200780773_c6051a7d71_n.jpg",
626
- "flower-competition-data/train_images/roses/10503217854_e66a804309.jpg",
627
- "flower-competition-data/train_images/sunflowers/1022552002_2b93faf9e7_n.jpg",
628
- "flower-competition-data/train_images/tulips/100930342_92e8746431_n.jpg"]
629
-
630
- for image in example_images:
631
- shutil.copy(image, 'example_data')
632
-
633
- # Deploy model by version number
634
- myplayground.deploy_model(model_version=1, example_data="example_data", y_train=y_train)
635
-
636
- # example url from deployed playground: apiurl= "https://123456.execute-api.us-east-1.amazonaws.com/prod/m
637
- apiurl = myplayground.playground_url
638
-
639
- # Submit Model 2
640
- # Define model
641
- class NeuralNetwork(nn.Module):
642
- def __init__(self):
643
- super(NeuralNetwork, self).__init__()
644
- self.flatten = nn.Flatten()
645
- self.linear_relu_stack = nn.Sequential(
646
- nn.Linear(128 * 128 * 3, 512),
647
- nn.ReLU(),
648
- nn.Linear(512, 512),
649
- nn.ReLU(),
650
- nn.Linear(512, 256),
651
- nn.ReLU(),
652
- nn.Linear(256, 5)
653
- )
654
-
655
- def forward(self, x):
656
- x = self.flatten(x)
657
- logits = self.linear_relu_stack(x)
658
- return logits
659
-
660
- model2 = NeuralNetwork().to(device)
661
- print(model2)
662
-
663
- # set up loss function and optimizer
664
- loss_fn = nn.CrossEntropyLoss()
665
- optimizer = torch.optim.SGD(model2.parameters(), lr=1e-3)
666
-
667
- # train model
668
- epochs = 2
669
- for t in range(epochs):
670
- print(f"Epoch {t + 1}\n-------------------------------")
671
- train(train_dataloader, model2, loss_fn, optimizer)
672
- test(test_dataloader, model2, loss_fn)
673
- print("Done!")
674
-
675
- # Submit Model 2 to Experiment Leaderboard
676
- myplayground.submit_model(model=model2,
677
- preprocessor=preprocessor,
678
- prediction_submission=prediction_labels,
679
- input_dict={"description": "", "tags": ""},
680
- submission_type="all",
681
- model_input = example_input)
682
-
683
- # submit model through competition
684
- mycompetition = ai.playground.Competition(myplayground.playground_url)
685
- mycompetition.submit_model(model=model2,
686
- preprocessor=preprocessor,
687
- prediction_submission=prediction_labels,
688
- input_dict={"description": "", "tags": ""},
689
- model_input=example_input)
690
-
691
- # submit model through experiment
692
- myexperiment = ai.playground.Experiment(myplayground.playground_url)
693
- myexperiment.submit_model(model=model2,
694
- preprocessor=preprocessor,
695
- prediction_submission=prediction_labels,
696
- input_dict={"description": "", "tags": ""},
697
- model_input=example_input)
698
-
699
- # Check experiment leaderboard
700
- data = myplayground.get_leaderboard()
701
- myplayground.stylize_leaderboard(data)
702
- assert isinstance(data, pd.DataFrame)
703
-
704
- # Compare two or more models
705
- data = myplayground.compare_models([1, 2], verbose=1)
706
- myplayground.stylize_compare(data)
707
- assert isinstance(data, (pd.DataFrame, dict))
708
-
709
- # Check structure of evaluation data
710
- data = myplayground.inspect_eval_data()
711
- assert isinstance(data, dict)
712
-
713
- # Update runtime model
714
- myplayground.update_runtime_model(model_version=2)
715
-
716
- # delete
717
- myplayground.delete_deployment(confirmation=False)
718
-
719
- # local cleanup
720
- shutil.rmtree("flower-competition-data", onerror=redo_with_write)
721
- shutil.rmtree("example_data", onerror=redo_with_write)