@epfml/discojs 1.0.0 → 2.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +28 -8
- package/dist/{async_buffer.d.ts → core/async_buffer.d.ts} +3 -3
- package/dist/{async_buffer.js → core/async_buffer.js} +5 -6
- package/dist/{async_informant.d.ts → core/async_informant.d.ts} +0 -0
- package/dist/{async_informant.js → core/async_informant.js} +0 -0
- package/dist/{client → core/client}/base.d.ts +4 -7
- package/dist/{client → core/client}/base.js +3 -2
- package/dist/core/client/decentralized/base.d.ts +32 -0
- package/dist/core/client/decentralized/base.js +212 -0
- package/dist/core/client/decentralized/clear_text.d.ts +14 -0
- package/dist/core/client/decentralized/clear_text.js +96 -0
- package/dist/{client → core/client}/decentralized/index.d.ts +0 -0
- package/dist/{client → core/client}/decentralized/index.js +0 -0
- package/dist/core/client/decentralized/messages.d.ts +41 -0
- package/dist/core/client/decentralized/messages.js +54 -0
- package/dist/core/client/decentralized/peer.d.ts +26 -0
- package/dist/core/client/decentralized/peer.js +210 -0
- package/dist/core/client/decentralized/peer_pool.d.ts +14 -0
- package/dist/core/client/decentralized/peer_pool.js +92 -0
- package/dist/core/client/decentralized/sec_agg.d.ts +22 -0
- package/dist/core/client/decentralized/sec_agg.js +190 -0
- package/dist/core/client/decentralized/secret_shares.d.ts +3 -0
- package/dist/core/client/decentralized/secret_shares.js +39 -0
- package/dist/core/client/decentralized/types.d.ts +2 -0
- package/dist/core/client/decentralized/types.js +7 -0
- package/dist/core/client/event_connection.d.ts +37 -0
- package/dist/core/client/event_connection.js +158 -0
- package/dist/core/client/federated/client.d.ts +37 -0
- package/dist/core/client/federated/client.js +273 -0
- package/dist/core/client/federated/index.d.ts +2 -0
- package/dist/core/client/federated/index.js +7 -0
- package/dist/core/client/federated/messages.d.ts +38 -0
- package/dist/core/client/federated/messages.js +25 -0
- package/dist/{client → core/client}/index.d.ts +2 -1
- package/dist/{client → core/client}/index.js +3 -3
- package/dist/{client → core/client}/local.d.ts +2 -2
- package/dist/{client → core/client}/local.js +0 -0
- package/dist/core/client/messages.d.ts +28 -0
- package/dist/core/client/messages.js +33 -0
- package/dist/core/client/utils.d.ts +2 -0
- package/dist/core/client/utils.js +19 -0
- package/dist/core/dataset/data/data.d.ts +11 -0
- package/dist/core/dataset/data/data.js +20 -0
- package/dist/core/dataset/data/data_split.d.ts +5 -0
- package/dist/{client/decentralized/types.js → core/dataset/data/data_split.js} +0 -0
- package/dist/core/dataset/data/image_data.d.ts +8 -0
- package/dist/core/dataset/data/image_data.js +64 -0
- package/dist/core/dataset/data/index.d.ts +5 -0
- package/dist/core/dataset/data/index.js +11 -0
- package/dist/core/dataset/data/preprocessing.d.ts +13 -0
- package/dist/core/dataset/data/preprocessing.js +33 -0
- package/dist/core/dataset/data/tabular_data.d.ts +8 -0
- package/dist/core/dataset/data/tabular_data.js +40 -0
- package/dist/{dataset → core/dataset}/data_loader/data_loader.d.ts +4 -11
- package/dist/{dataset → core/dataset}/data_loader/data_loader.js +0 -0
- package/dist/core/dataset/data_loader/image_loader.d.ts +17 -0
- package/dist/core/dataset/data_loader/image_loader.js +141 -0
- package/dist/core/dataset/data_loader/index.d.ts +3 -0
- package/dist/core/dataset/data_loader/index.js +9 -0
- package/dist/core/dataset/data_loader/tabular_loader.d.ts +29 -0
- package/dist/core/dataset/data_loader/tabular_loader.js +101 -0
- package/dist/core/dataset/dataset.d.ts +2 -0
- package/dist/{task/training_information.js → core/dataset/dataset.js} +0 -0
- package/dist/{dataset → core/dataset}/dataset_builder.d.ts +5 -5
- package/dist/{dataset → core/dataset}/dataset_builder.js +14 -10
- package/dist/core/dataset/index.d.ts +4 -0
- package/dist/core/dataset/index.js +14 -0
- package/dist/core/default_tasks/cifar10.d.ts +2 -0
- package/dist/core/default_tasks/cifar10.js +68 -0
- package/dist/core/default_tasks/geotags.d.ts +2 -0
- package/dist/core/default_tasks/geotags.js +69 -0
- package/dist/core/default_tasks/index.d.ts +6 -0
- package/dist/core/default_tasks/index.js +15 -0
- package/dist/core/default_tasks/lus_covid.d.ts +2 -0
- package/dist/core/default_tasks/lus_covid.js +96 -0
- package/dist/core/default_tasks/mnist.d.ts +2 -0
- package/dist/core/default_tasks/mnist.js +69 -0
- package/dist/core/default_tasks/simple_face.d.ts +2 -0
- package/dist/core/default_tasks/simple_face.js +53 -0
- package/dist/core/default_tasks/titanic.d.ts +2 -0
- package/dist/core/default_tasks/titanic.js +97 -0
- package/dist/core/index.d.ts +18 -0
- package/dist/core/index.js +39 -0
- package/dist/{informant → core/informant}/graph_informant.d.ts +0 -0
- package/dist/{informant → core/informant}/graph_informant.js +0 -0
- package/dist/{informant → core/informant}/index.d.ts +0 -0
- package/dist/{informant → core/informant}/index.js +0 -0
- package/dist/{informant → core/informant}/training_informant/base.d.ts +3 -3
- package/dist/{informant → core/informant}/training_informant/base.js +3 -2
- package/dist/{informant → core/informant}/training_informant/decentralized.d.ts +0 -0
- package/dist/{informant → core/informant}/training_informant/decentralized.js +0 -0
- package/dist/{informant → core/informant}/training_informant/federated.d.ts +0 -0
- package/dist/{informant → core/informant}/training_informant/federated.js +0 -0
- package/dist/{informant → core/informant}/training_informant/index.d.ts +0 -0
- package/dist/{informant → core/informant}/training_informant/index.js +0 -0
- package/dist/{informant → core/informant}/training_informant/local.d.ts +2 -2
- package/dist/{informant → core/informant}/training_informant/local.js +2 -2
- package/dist/{logging → core/logging}/console_logger.d.ts +0 -0
- package/dist/{logging → core/logging}/console_logger.js +0 -0
- package/dist/{logging → core/logging}/index.d.ts +0 -0
- package/dist/{logging → core/logging}/index.js +0 -0
- package/dist/{logging → core/logging}/logger.d.ts +0 -0
- package/dist/{logging → core/logging}/logger.js +0 -0
- package/dist/{logging → core/logging}/trainer_logger.d.ts +0 -0
- package/dist/{logging → core/logging}/trainer_logger.js +0 -0
- package/dist/{memory → core/memory}/base.d.ts +2 -2
- package/dist/{memory → core/memory}/base.js +0 -0
- package/dist/{memory → core/memory}/empty.d.ts +0 -0
- package/dist/{memory → core/memory}/empty.js +0 -0
- package/dist/core/memory/index.d.ts +3 -0
- package/dist/core/memory/index.js +9 -0
- package/dist/{memory → core/memory}/model_type.d.ts +0 -0
- package/dist/{memory → core/memory}/model_type.js +0 -0
- package/dist/{privacy.d.ts → core/privacy.d.ts} +2 -3
- package/dist/{privacy.js → core/privacy.js} +3 -16
- package/dist/{serialization → core/serialization}/index.d.ts +0 -0
- package/dist/{serialization → core/serialization}/index.js +0 -0
- package/dist/{serialization → core/serialization}/model.d.ts +0 -0
- package/dist/{serialization → core/serialization}/model.js +0 -0
- package/dist/core/serialization/weights.d.ts +5 -0
- package/dist/{serialization → core/serialization}/weights.js +11 -9
- package/dist/{task → core/task}/data_example.d.ts +0 -0
- package/dist/{task → core/task}/data_example.js +0 -0
- package/dist/core/task/digest.d.ts +5 -0
- package/dist/core/task/digest.js +18 -0
- package/dist/{task → core/task}/display_information.d.ts +5 -5
- package/dist/{task → core/task}/display_information.js +5 -10
- package/dist/{task → core/task}/index.d.ts +3 -0
- package/dist/core/task/index.js +15 -0
- package/dist/core/task/model_compile_data.d.ts +6 -0
- package/dist/core/task/model_compile_data.js +22 -0
- package/dist/{task → core/task}/summary.d.ts +0 -0
- package/dist/{task → core/task}/summary.js +0 -4
- package/dist/{task → core/task}/task.d.ts +4 -2
- package/dist/{task → core/task}/task.js +10 -7
- package/dist/core/task/task_handler.d.ts +5 -0
- package/dist/core/task/task_handler.js +53 -0
- package/dist/core/task/task_provider.d.ts +6 -0
- package/dist/core/task/task_provider.js +13 -0
- package/dist/{task → core/task}/training_information.d.ts +10 -14
- package/dist/core/task/training_information.js +66 -0
- package/dist/core/training/disco.d.ts +23 -0
- package/dist/core/training/disco.js +130 -0
- package/dist/{training → core/training}/index.d.ts +0 -0
- package/dist/{training → core/training}/index.js +0 -0
- package/dist/{training → core/training}/trainer/distributed_trainer.d.ts +1 -2
- package/dist/{training → core/training}/trainer/distributed_trainer.js +6 -5
- package/dist/{training → core/training}/trainer/local_trainer.d.ts +2 -2
- package/dist/{training → core/training}/trainer/local_trainer.js +0 -0
- package/dist/{training → core/training}/trainer/round_tracker.d.ts +0 -0
- package/dist/{training → core/training}/trainer/round_tracker.js +0 -0
- package/dist/{training → core/training}/trainer/trainer.d.ts +1 -2
- package/dist/{training → core/training}/trainer/trainer.js +2 -2
- package/dist/{training → core/training}/trainer/trainer_builder.d.ts +0 -0
- package/dist/{training → core/training}/trainer/trainer_builder.js +0 -0
- package/dist/core/training/training_schemes.d.ts +5 -0
- package/dist/{training → core/training}/training_schemes.js +2 -2
- package/dist/{types.d.ts → core/types.d.ts} +0 -0
- package/dist/{types.js → core/types.js} +0 -0
- package/dist/{validation → core/validation}/index.d.ts +0 -0
- package/dist/{validation → core/validation}/index.js +0 -0
- package/dist/{validation → core/validation}/validator.d.ts +5 -8
- package/dist/{validation → core/validation}/validator.js +9 -11
- package/dist/core/weights/aggregation.d.ts +7 -0
- package/dist/core/weights/aggregation.js +72 -0
- package/dist/core/weights/index.d.ts +2 -0
- package/dist/core/weights/index.js +7 -0
- package/dist/core/weights/weights_container.d.ts +19 -0
- package/dist/core/weights/weights_container.js +64 -0
- package/dist/dataset/data_loader/image_loader.d.ts +3 -15
- package/dist/dataset/data_loader/image_loader.js +12 -125
- package/dist/dataset/data_loader/index.d.ts +2 -3
- package/dist/dataset/data_loader/index.js +3 -5
- package/dist/dataset/data_loader/tabular_loader.d.ts +3 -28
- package/dist/dataset/data_loader/tabular_loader.js +11 -92
- package/dist/imports.d.ts +2 -0
- package/dist/imports.js +7 -0
- package/dist/index.d.ts +2 -19
- package/dist/index.js +3 -39
- package/dist/memory/index.d.ts +1 -3
- package/dist/memory/index.js +3 -7
- package/dist/memory/memory.d.ts +26 -0
- package/dist/memory/memory.js +160 -0
- package/package.json +13 -26
- package/dist/aggregation.d.ts +0 -5
- package/dist/aggregation.js +0 -33
- package/dist/client/decentralized/base.d.ts +0 -43
- package/dist/client/decentralized/base.js +0 -243
- package/dist/client/decentralized/clear_text.d.ts +0 -13
- package/dist/client/decentralized/clear_text.js +0 -78
- package/dist/client/decentralized/messages.d.ts +0 -37
- package/dist/client/decentralized/messages.js +0 -15
- package/dist/client/decentralized/sec_agg.d.ts +0 -18
- package/dist/client/decentralized/sec_agg.js +0 -169
- package/dist/client/decentralized/secret_shares.d.ts +0 -5
- package/dist/client/decentralized/secret_shares.js +0 -58
- package/dist/client/decentralized/types.d.ts +0 -1
- package/dist/client/federated.d.ts +0 -30
- package/dist/client/federated.js +0 -218
- package/dist/dataset/index.d.ts +0 -2
- package/dist/dataset/index.js +0 -7
- package/dist/model_actor.d.ts +0 -16
- package/dist/model_actor.js +0 -20
- package/dist/serialization/weights.d.ts +0 -5
- package/dist/task/index.js +0 -8
- package/dist/task/model_compile_data.d.ts +0 -6
- package/dist/task/model_compile_data.js +0 -12
- package/dist/tasks/cifar10.d.ts +0 -4
- package/dist/tasks/cifar10.js +0 -76
- package/dist/tasks/index.d.ts +0 -5
- package/dist/tasks/index.js +0 -9
- package/dist/tasks/lus_covid.d.ts +0 -4
- package/dist/tasks/lus_covid.js +0 -85
- package/dist/tasks/mnist.d.ts +0 -4
- package/dist/tasks/mnist.js +0 -58
- package/dist/tasks/simple_face.d.ts +0 -4
- package/dist/tasks/simple_face.js +0 -84
- package/dist/tasks/titanic.d.ts +0 -4
- package/dist/tasks/titanic.js +0 -88
- package/dist/tfjs.d.ts +0 -2
- package/dist/tfjs.js +0 -6
- package/dist/training/disco.d.ts +0 -14
- package/dist/training/disco.js +0 -70
- package/dist/training/training_schemes.d.ts +0 -5
|
@@ -12,7 +12,7 @@ var DatasetBuilder = /** @class */ (function () {
|
|
|
12
12
|
}
|
|
13
13
|
DatasetBuilder.prototype.addFiles = function (sources, label) {
|
|
14
14
|
if (this.built) {
|
|
15
|
-
|
|
15
|
+
this.resetBuiltState();
|
|
16
16
|
}
|
|
17
17
|
if (label === undefined) {
|
|
18
18
|
this.sources = this.sources.concat(sources);
|
|
@@ -29,7 +29,7 @@ var DatasetBuilder = /** @class */ (function () {
|
|
|
29
29
|
};
|
|
30
30
|
DatasetBuilder.prototype.clearFiles = function (label) {
|
|
31
31
|
if (this.built) {
|
|
32
|
-
|
|
32
|
+
this.resetBuiltState();
|
|
33
33
|
}
|
|
34
34
|
if (label === undefined) {
|
|
35
35
|
this.sources = [];
|
|
@@ -38,6 +38,11 @@ var DatasetBuilder = /** @class */ (function () {
|
|
|
38
38
|
this.labelledSources.delete(label);
|
|
39
39
|
}
|
|
40
40
|
};
|
|
41
|
+
// If files are added or removed, then this should be called since the latest
|
|
42
|
+
// version of the dataset_builder has not yet been built.
|
|
43
|
+
DatasetBuilder.prototype.resetBuiltState = function () {
|
|
44
|
+
this.built = false;
|
|
45
|
+
};
|
|
41
46
|
DatasetBuilder.prototype.getLabels = function () {
|
|
42
47
|
// We need to duplicate the labels as we need one for each soure.
|
|
43
48
|
// Say for label A we have sources [img1, img2, img3], then we
|
|
@@ -50,29 +55,28 @@ var DatasetBuilder = /** @class */ (function () {
|
|
|
50
55
|
return labels.flat();
|
|
51
56
|
};
|
|
52
57
|
DatasetBuilder.prototype.build = function (config) {
|
|
53
|
-
var _a, _b;
|
|
54
58
|
return (0, tslib_1.__awaiter)(this, void 0, void 0, function () {
|
|
55
59
|
var dataTuple, defaultConfig, defaultConfig, sources;
|
|
56
|
-
return (0, tslib_1.__generator)(this, function (
|
|
57
|
-
switch (
|
|
60
|
+
return (0, tslib_1.__generator)(this, function (_a) {
|
|
61
|
+
switch (_a.label) {
|
|
58
62
|
case 0:
|
|
59
63
|
// Require that at leat one source collection is non-empty, but not both
|
|
60
64
|
if ((this.sources.length > 0) === (this.labelledSources.size > 0)) {
|
|
61
|
-
throw new Error('
|
|
65
|
+
throw new Error('Please provide dataset input files');
|
|
62
66
|
}
|
|
63
67
|
if (!(this.sources.length > 0)) return [3 /*break*/, 2];
|
|
64
|
-
defaultConfig = (0, tslib_1.__assign)({ features:
|
|
68
|
+
defaultConfig = (0, tslib_1.__assign)({ features: this.task.trainingInformation.inputColumns, labels: this.task.trainingInformation.outputColumns }, config);
|
|
65
69
|
return [4 /*yield*/, this.dataLoader.loadAll(this.sources, defaultConfig)];
|
|
66
70
|
case 1:
|
|
67
|
-
dataTuple =
|
|
71
|
+
dataTuple = _a.sent();
|
|
68
72
|
return [3 /*break*/, 4];
|
|
69
73
|
case 2:
|
|
70
74
|
defaultConfig = (0, tslib_1.__assign)({ labels: this.getLabels() }, config);
|
|
71
75
|
sources = Array.from(this.labelledSources.values()).flat();
|
|
72
76
|
return [4 /*yield*/, this.dataLoader.loadAll(sources, defaultConfig)];
|
|
73
77
|
case 3:
|
|
74
|
-
dataTuple =
|
|
75
|
-
|
|
78
|
+
dataTuple = _a.sent();
|
|
79
|
+
_a.label = 4;
|
|
76
80
|
case 4:
|
|
77
81
|
// TODO @s314cy: Support .csv labels for image datasets (supervised training or testing)
|
|
78
82
|
this.built = true;
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.DataLoader = exports.TabularLoader = exports.ImageLoader = exports.ImagePreprocessing = exports.ImageData = exports.TabularData = exports.Data = exports.DatasetBuilder = void 0;
|
|
4
|
+
var dataset_builder_1 = require("./dataset_builder");
|
|
5
|
+
Object.defineProperty(exports, "DatasetBuilder", { enumerable: true, get: function () { return dataset_builder_1.DatasetBuilder; } });
|
|
6
|
+
var data_1 = require("./data");
|
|
7
|
+
Object.defineProperty(exports, "Data", { enumerable: true, get: function () { return data_1.Data; } });
|
|
8
|
+
Object.defineProperty(exports, "TabularData", { enumerable: true, get: function () { return data_1.TabularData; } });
|
|
9
|
+
Object.defineProperty(exports, "ImageData", { enumerable: true, get: function () { return data_1.ImageData; } });
|
|
10
|
+
Object.defineProperty(exports, "ImagePreprocessing", { enumerable: true, get: function () { return data_1.ImagePreprocessing; } });
|
|
11
|
+
var data_loader_1 = require("./data_loader");
|
|
12
|
+
Object.defineProperty(exports, "ImageLoader", { enumerable: true, get: function () { return data_loader_1.ImageLoader; } });
|
|
13
|
+
Object.defineProperty(exports, "TabularLoader", { enumerable: true, get: function () { return data_loader_1.TabularLoader; } });
|
|
14
|
+
Object.defineProperty(exports, "DataLoader", { enumerable: true, get: function () { return data_loader_1.DataLoader; } });
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.cifar10 = void 0;
|
|
4
|
+
var tslib_1 = require("tslib");
|
|
5
|
+
var __1 = require("..");
|
|
6
|
+
exports.cifar10 = {
|
|
7
|
+
getTask: function () {
|
|
8
|
+
return {
|
|
9
|
+
taskID: 'cifar10',
|
|
10
|
+
displayInformation: {
|
|
11
|
+
taskTitle: 'CIFAR10',
|
|
12
|
+
summary: {
|
|
13
|
+
preview: 'In this challenge, we ask you to classify images into categories based on the objects shown on the image.',
|
|
14
|
+
overview: 'The CIFAR-10 dataset is a collection of images that are commonly used to train machine learning and computer vision algorithms. It is one of the most widely used datasets for machine learning research.'
|
|
15
|
+
},
|
|
16
|
+
limitations: 'The training data is limited to small images of size 32x32.',
|
|
17
|
+
tradeoffs: 'Training success strongly depends on label distribution',
|
|
18
|
+
dataFormatInformation: 'Images should be of .png format and of size 32x32. <br> The label file should be .csv, where each row contains a file_name, class. <br> <br> e.g. if you have images: 0.png (of a frog) and 1.png (of a car) <br> labels.csv contains: (Note that no header is needed)<br> 0.png, frog <br> 1.png, car',
|
|
19
|
+
dataExampleText: 'Below you can find 10 random examples from each of the 10 classes in the dataset.',
|
|
20
|
+
dataExampleImage: 'https://storage.googleapis.com/deai-313515.appspot.com/example_training_data/cifar10-example.png'
|
|
21
|
+
},
|
|
22
|
+
trainingInformation: {
|
|
23
|
+
modelID: 'cifar10-model',
|
|
24
|
+
epochs: 10,
|
|
25
|
+
roundDuration: 10,
|
|
26
|
+
validationSplit: 0.2,
|
|
27
|
+
batchSize: 10,
|
|
28
|
+
modelCompileData: {
|
|
29
|
+
optimizer: 'sgd',
|
|
30
|
+
loss: 'categoricalCrossentropy',
|
|
31
|
+
metrics: ['accuracy']
|
|
32
|
+
},
|
|
33
|
+
dataType: 'image',
|
|
34
|
+
IMAGE_H: 32,
|
|
35
|
+
IMAGE_W: 32,
|
|
36
|
+
preprocessingFunctions: [],
|
|
37
|
+
LABEL_LIST: ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],
|
|
38
|
+
scheme: 'Decentralized',
|
|
39
|
+
noiseScale: undefined,
|
|
40
|
+
clippingRadius: 20,
|
|
41
|
+
decentralizedSecure: true,
|
|
42
|
+
minimumReadyPeers: 3,
|
|
43
|
+
maxShareValue: 100
|
|
44
|
+
}
|
|
45
|
+
};
|
|
46
|
+
},
|
|
47
|
+
getModel: function () {
|
|
48
|
+
return (0, tslib_1.__awaiter)(this, void 0, void 0, function () {
|
|
49
|
+
var mobilenet, x, predictions;
|
|
50
|
+
return (0, tslib_1.__generator)(this, function (_a) {
|
|
51
|
+
switch (_a.label) {
|
|
52
|
+
case 0: return [4 /*yield*/, __1.tf.loadLayersModel('https://storage.googleapis.com/tfjs-models/tfjs/mobilenet_v1_0.25_224/model.json')];
|
|
53
|
+
case 1:
|
|
54
|
+
mobilenet = _a.sent();
|
|
55
|
+
x = mobilenet.getLayer('global_average_pooling2d_1');
|
|
56
|
+
predictions = __1.tf.layers
|
|
57
|
+
.dense({ units: 10, activation: 'softmax', name: 'denseModified' })
|
|
58
|
+
.apply(x.output);
|
|
59
|
+
return [2 /*return*/, __1.tf.model({
|
|
60
|
+
inputs: mobilenet.input,
|
|
61
|
+
outputs: predictions,
|
|
62
|
+
name: 'modelModified'
|
|
63
|
+
})];
|
|
64
|
+
}
|
|
65
|
+
});
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
};
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.geotags = void 0;
|
|
4
|
+
var tslib_1 = require("tslib");
|
|
5
|
+
var __1 = require("..");
|
|
6
|
+
var immutable_1 = require("immutable");
|
|
7
|
+
exports.geotags = {
|
|
8
|
+
getTask: function () {
|
|
9
|
+
return {
|
|
10
|
+
taskID: 'geotags',
|
|
11
|
+
displayInformation: {
|
|
12
|
+
taskTitle: 'GeoTags',
|
|
13
|
+
summary: {
|
|
14
|
+
preview: 'In this challenge, we predict the geo-location of a photo given its pixels in terms of a cell number of a grid built on top of Switzerland',
|
|
15
|
+
overview: 'The geotags dataset is a collection of images with geo-location information used to train a machine learning algorithm to predict the location of a photo given its pixels.'
|
|
16
|
+
},
|
|
17
|
+
limitations: 'The training data is limited to images of size 224x224.',
|
|
18
|
+
tradeoffs: 'Training success strongly depends on label distribution',
|
|
19
|
+
dataFormatInformation: 'Images should be of .png format and of size 224x224. <br> The label file should be .csv, where each row contains a file_name, class. The class is the cell number of a the given grid of Switzerland. '
|
|
20
|
+
},
|
|
21
|
+
trainingInformation: {
|
|
22
|
+
modelID: 'geotags-model',
|
|
23
|
+
epochs: 10,
|
|
24
|
+
roundDuration: 10,
|
|
25
|
+
validationSplit: 0.2,
|
|
26
|
+
batchSize: 10,
|
|
27
|
+
modelCompileData: {
|
|
28
|
+
optimizer: 'adam',
|
|
29
|
+
loss: 'categoricalCrossentropy',
|
|
30
|
+
metrics: ['accuracy']
|
|
31
|
+
},
|
|
32
|
+
dataType: 'image',
|
|
33
|
+
IMAGE_H: 224,
|
|
34
|
+
IMAGE_W: 224,
|
|
35
|
+
preprocessingFunctions: [__1.data.ImagePreprocessing.Resize],
|
|
36
|
+
LABEL_LIST: (0, immutable_1.Range)(0, 140).map(String).toArray(),
|
|
37
|
+
scheme: 'Federated',
|
|
38
|
+
noiseScale: undefined,
|
|
39
|
+
clippingRadius: 20,
|
|
40
|
+
decentralizedSecure: true,
|
|
41
|
+
minimumReadyPeers: 3,
|
|
42
|
+
maxShareValue: 100
|
|
43
|
+
}
|
|
44
|
+
};
|
|
45
|
+
},
|
|
46
|
+
getModel: function () {
|
|
47
|
+
return (0, tslib_1.__awaiter)(this, void 0, void 0, function () {
|
|
48
|
+
var pretrainedModel, numLayers, model;
|
|
49
|
+
return (0, tslib_1.__generator)(this, function (_a) {
|
|
50
|
+
switch (_a.label) {
|
|
51
|
+
case 0: return [4 /*yield*/, __1.tf.loadLayersModel('https://storage.googleapis.com/epfl-disco-models/geotags/v2/model.json')];
|
|
52
|
+
case 1:
|
|
53
|
+
pretrainedModel = _a.sent();
|
|
54
|
+
numLayers = pretrainedModel.layers.length;
|
|
55
|
+
pretrainedModel.layers.forEach(function (layer) { layer.trainable = false; });
|
|
56
|
+
pretrainedModel.layers[numLayers - 1].trainable = true;
|
|
57
|
+
model = __1.tf.sequential({
|
|
58
|
+
layers: [
|
|
59
|
+
__1.tf.layers.inputLayer({ inputShape: [224, 224, 3] }),
|
|
60
|
+
__1.tf.layers.rescaling({ scale: 1 / 127.5, offset: -1 }),
|
|
61
|
+
pretrainedModel
|
|
62
|
+
]
|
|
63
|
+
});
|
|
64
|
+
return [2 /*return*/, model];
|
|
65
|
+
}
|
|
66
|
+
});
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
};
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.geotags = exports.simpleFace = exports.titanic = exports.mnist = exports.lusCovid = exports.cifar10 = void 0;
|
|
4
|
+
var cifar10_1 = require("./cifar10");
|
|
5
|
+
Object.defineProperty(exports, "cifar10", { enumerable: true, get: function () { return cifar10_1.cifar10; } });
|
|
6
|
+
var lus_covid_1 = require("./lus_covid");
|
|
7
|
+
Object.defineProperty(exports, "lusCovid", { enumerable: true, get: function () { return lus_covid_1.lusCovid; } });
|
|
8
|
+
var mnist_1 = require("./mnist");
|
|
9
|
+
Object.defineProperty(exports, "mnist", { enumerable: true, get: function () { return mnist_1.mnist; } });
|
|
10
|
+
var titanic_1 = require("./titanic");
|
|
11
|
+
Object.defineProperty(exports, "titanic", { enumerable: true, get: function () { return titanic_1.titanic; } });
|
|
12
|
+
var simple_face_1 = require("./simple_face");
|
|
13
|
+
Object.defineProperty(exports, "simpleFace", { enumerable: true, get: function () { return simple_face_1.simpleFace; } });
|
|
14
|
+
var geotags_1 = require("./geotags");
|
|
15
|
+
Object.defineProperty(exports, "geotags", { enumerable: true, get: function () { return geotags_1.geotags; } });
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.lusCovid = void 0;
|
|
4
|
+
var tslib_1 = require("tslib");
|
|
5
|
+
var __1 = require("..");
|
|
6
|
+
exports.lusCovid = {
|
|
7
|
+
getTask: function () {
|
|
8
|
+
return {
|
|
9
|
+
taskID: 'lus_covid',
|
|
10
|
+
displayInformation: {
|
|
11
|
+
taskTitle: 'COVID Lung Ultrasound',
|
|
12
|
+
summary: {
|
|
13
|
+
preview: 'Do you have a data of lung ultrasound images on patients <b>suspected of Lower Respiratory Tract infection (LRTI) during the COVID pandemic</b>? <br> Learn how to discriminate between COVID positive and negative patients by joining this task.',
|
|
14
|
+
overview: "Don’t have a dataset of your own? Download a sample of a few cases <a class='underline' href='https://drive.switch.ch/index.php/s/zM5ZrUWK3taaIly' target='_blank'>here</a>."
|
|
15
|
+
},
|
|
16
|
+
model: "We use a simplified* version of the <b>DeepChest model</b>: A deep learning model developed in our lab (<a class='underline' href='https://www.epfl.ch/labs/mlo/igh-intelligent-global-health/'>intelligent Global Health</a>.). On a cohort of 400 Swiss patients suspected of LRTI, the model obtained over 90% area under the ROC curve for this task. <br><br>*Simplified to ensure smooth running on your browser, the performance is minimally affected. Details of the adaptations are below <br>- <b>Removed</b>: positional embedding (i.e. we don’t take the anatomic position into consideration). Rather, the model now does mean pooling over the feature vector of the images for each patient <br>- <b>Replaced</b>: ResNet18 by Mobilenet",
|
|
17
|
+
tradeoffs: 'We are using a simpler version of DeepChest in order to be able to run it on the browser.',
|
|
18
|
+
dataFormatInformation: 'This model takes as input an image dataset. It consists on a set of lung ultrasound images per patient with its corresponding label of covid positive or negative. Moreover, to identify the images per patient you have to follow the follwing naming pattern: "patientId_*.png"',
|
|
19
|
+
dataExampleText: 'Below you can find an example of an expected lung image for patient 2 named: 2_QAID_1.masked.reshaped.squared.224.png',
|
|
20
|
+
dataExampleImage: 'https://storage.googleapis.com/deai-313515.appspot.com/example_training_data/2_QAID_1.masked.reshaped.squared.224.png'
|
|
21
|
+
},
|
|
22
|
+
trainingInformation: {
|
|
23
|
+
modelID: 'lus-covid-model',
|
|
24
|
+
epochs: 15,
|
|
25
|
+
roundDuration: 10,
|
|
26
|
+
validationSplit: 0.2,
|
|
27
|
+
batchSize: 2,
|
|
28
|
+
modelCompileData: {
|
|
29
|
+
optimizer: 'sgd',
|
|
30
|
+
loss: 'binaryCrossentropy',
|
|
31
|
+
metrics: ['accuracy']
|
|
32
|
+
},
|
|
33
|
+
learningRate: 0.001,
|
|
34
|
+
IMAGE_H: 100,
|
|
35
|
+
IMAGE_W: 100,
|
|
36
|
+
preprocessingFunctions: [__1.data.ImagePreprocessing.Resize],
|
|
37
|
+
LABEL_LIST: ['COVID-Positive', 'COVID-Negative'],
|
|
38
|
+
dataType: 'image',
|
|
39
|
+
scheme: 'Decentralized',
|
|
40
|
+
noiseScale: undefined,
|
|
41
|
+
clippingRadius: 20,
|
|
42
|
+
decentralizedSecure: true,
|
|
43
|
+
minimumReadyPeers: 3,
|
|
44
|
+
maxShareValue: 100
|
|
45
|
+
}
|
|
46
|
+
};
|
|
47
|
+
},
|
|
48
|
+
getModel: function () {
|
|
49
|
+
return (0, tslib_1.__awaiter)(this, void 0, void 0, function () {
|
|
50
|
+
var imageHeight, imageWidth, imageChannels, numOutputClasses, model;
|
|
51
|
+
return (0, tslib_1.__generator)(this, function (_a) {
|
|
52
|
+
imageHeight = 100;
|
|
53
|
+
imageWidth = 100;
|
|
54
|
+
imageChannels = 3;
|
|
55
|
+
numOutputClasses = 2;
|
|
56
|
+
model = __1.tf.sequential();
|
|
57
|
+
// In the first layer of our convolutional neural network we have
|
|
58
|
+
// to specify the input shape. Then we specify some parameters for
|
|
59
|
+
// the convolution operation that takes place in this layer.
|
|
60
|
+
model.add(__1.tf.layers.conv2d({
|
|
61
|
+
inputShape: [imageHeight, imageWidth, imageChannels],
|
|
62
|
+
kernelSize: 5,
|
|
63
|
+
filters: 8,
|
|
64
|
+
strides: 1,
|
|
65
|
+
activation: 'relu',
|
|
66
|
+
kernelInitializer: 'varianceScaling'
|
|
67
|
+
}));
|
|
68
|
+
// The MaxPooling layer acts as a sort of downsampling using max values
|
|
69
|
+
// in a region instead of averaging.
|
|
70
|
+
model.add(__1.tf.layers.maxPooling2d({ poolSize: [2, 2], strides: [2, 2] }));
|
|
71
|
+
// Repeat another conv2d + maxPooling stack.
|
|
72
|
+
// Note that we have more filters in the convolution.
|
|
73
|
+
model.add(__1.tf.layers.conv2d({
|
|
74
|
+
kernelSize: 5,
|
|
75
|
+
filters: 16,
|
|
76
|
+
strides: 1,
|
|
77
|
+
activation: 'relu',
|
|
78
|
+
kernelInitializer: 'varianceScaling'
|
|
79
|
+
}));
|
|
80
|
+
model.add(__1.tf.layers.maxPooling2d({ poolSize: [2, 2], strides: [2, 2] }));
|
|
81
|
+
// Now we flatten the output from the 2D filters into a 1D vector to prepare
|
|
82
|
+
// it for input into our last layer. This is common practice when feeding
|
|
83
|
+
// higher dimensional data to a final classification output layer.
|
|
84
|
+
model.add(__1.tf.layers.flatten());
|
|
85
|
+
// Our last layer is a dense layer which has 2 output units, one for each
|
|
86
|
+
// output class.
|
|
87
|
+
model.add(__1.tf.layers.dense({
|
|
88
|
+
units: numOutputClasses,
|
|
89
|
+
kernelInitializer: 'varianceScaling',
|
|
90
|
+
activation: 'softmax'
|
|
91
|
+
}));
|
|
92
|
+
return [2 /*return*/, model];
|
|
93
|
+
});
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
};
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.mnist = void 0;
|
|
4
|
+
var tslib_1 = require("tslib");
|
|
5
|
+
var __1 = require("..");
|
|
6
|
+
exports.mnist = {
|
|
7
|
+
getTask: function () {
|
|
8
|
+
return {
|
|
9
|
+
taskID: 'mnist',
|
|
10
|
+
displayInformation: {
|
|
11
|
+
taskTitle: 'MNIST',
|
|
12
|
+
summary: {
|
|
13
|
+
preview: "Test our platform by using a publicly available <b>image</b> dataset. <br><br> Download the classic MNIST imagebank of hand-written numbers <a class='underline text-primary-dark dark:text-primary-light' href='https://www.kaggle.com/scolianni/mnistasjpg'>here</a>. <br> This model learns to identify hand written numbers.",
|
|
14
|
+
overview: 'The MNIST handwritten digit classification problem is a standard dataset used in computer vision and deep learning. Although the dataset is effectively solved, we use it to test our Decentralised Learning algorithms and platform.'
|
|
15
|
+
},
|
|
16
|
+
model: 'The current model is a very simple CNN and its main goal is to test the app and the Decentralizsed Learning functionality.',
|
|
17
|
+
tradeoffs: 'We are using a simple model, first a 2d convolutional layer > max pooling > 2d convolutional layer > max pooling > convolutional layer > 2 dense layers.',
|
|
18
|
+
dataFormatInformation: 'This model is trained on images corresponding to digits 0 to 9. You can upload each digit image of your dataset in the box corresponding to its label. The model taskes images of size 28x28 as input.',
|
|
19
|
+
dataExampleText: 'Below you can find an example of an expected image representing the digit 9.',
|
|
20
|
+
dataExampleImage: 'http://storage.googleapis.com/deai-313515.appspot.com/example_training_data/9-mnist-example.png'
|
|
21
|
+
},
|
|
22
|
+
trainingInformation: {
|
|
23
|
+
modelID: 'mnist-model',
|
|
24
|
+
epochs: 10,
|
|
25
|
+
roundDuration: 10,
|
|
26
|
+
validationSplit: 0.2,
|
|
27
|
+
batchSize: 30,
|
|
28
|
+
modelCompileData: {
|
|
29
|
+
optimizer: 'rmsprop',
|
|
30
|
+
loss: 'categoricalCrossentropy',
|
|
31
|
+
metrics: ['accuracy']
|
|
32
|
+
},
|
|
33
|
+
dataType: 'image',
|
|
34
|
+
IMAGE_H: 28,
|
|
35
|
+
IMAGE_W: 28,
|
|
36
|
+
preprocessingFunctions: [],
|
|
37
|
+
LABEL_LIST: ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'],
|
|
38
|
+
scheme: 'Decentralized',
|
|
39
|
+
noiseScale: undefined,
|
|
40
|
+
clippingRadius: 20,
|
|
41
|
+
decentralizedSecure: true,
|
|
42
|
+
minimumReadyPeers: 3,
|
|
43
|
+
maxShareValue: 100
|
|
44
|
+
}
|
|
45
|
+
};
|
|
46
|
+
},
|
|
47
|
+
getModel: function () {
|
|
48
|
+
return (0, tslib_1.__awaiter)(this, void 0, void 0, function () {
|
|
49
|
+
var model;
|
|
50
|
+
return (0, tslib_1.__generator)(this, function (_a) {
|
|
51
|
+
model = __1.tf.sequential();
|
|
52
|
+
model.add(__1.tf.layers.conv2d({
|
|
53
|
+
inputShape: [28, 28, 3],
|
|
54
|
+
kernelSize: 3,
|
|
55
|
+
filters: 16,
|
|
56
|
+
activation: 'relu'
|
|
57
|
+
}));
|
|
58
|
+
model.add(__1.tf.layers.maxPooling2d({ poolSize: 2, strides: 2 }));
|
|
59
|
+
model.add(__1.tf.layers.conv2d({ kernelSize: 3, filters: 32, activation: 'relu' }));
|
|
60
|
+
model.add(__1.tf.layers.maxPooling2d({ poolSize: 2, strides: 2 }));
|
|
61
|
+
model.add(__1.tf.layers.conv2d({ kernelSize: 3, filters: 32, activation: 'relu' }));
|
|
62
|
+
model.add(__1.tf.layers.flatten({}));
|
|
63
|
+
model.add(__1.tf.layers.dense({ units: 64, activation: 'relu' }));
|
|
64
|
+
model.add(__1.tf.layers.dense({ units: 10, activation: 'softmax' }));
|
|
65
|
+
return [2 /*return*/, model];
|
|
66
|
+
});
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
};
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.simpleFace = void 0;
|
|
4
|
+
var tslib_1 = require("tslib");
|
|
5
|
+
var __1 = require("..");
|
|
6
|
+
exports.simpleFace = {
|
|
7
|
+
getTask: function () {
|
|
8
|
+
return {
|
|
9
|
+
taskID: 'simple_face',
|
|
10
|
+
displayInformation: {
|
|
11
|
+
taskTitle: 'Simple Face',
|
|
12
|
+
summary: {
|
|
13
|
+
preview: 'Can you detect if the person in a picture is a child or an adult?',
|
|
14
|
+
overview: 'Simple face is a small subset of face_task from Kaggle'
|
|
15
|
+
},
|
|
16
|
+
limitations: 'The training data is limited to small images of size 200x200.',
|
|
17
|
+
tradeoffs: 'Training success strongly depends on label distribution',
|
|
18
|
+
dataFormatInformation: '',
|
|
19
|
+
dataExampleText: 'Below you find an example',
|
|
20
|
+
dataExampleImage: 'https://storage.googleapis.com/deai-313515.appspot.com/example_training_data/simple_face-example.png'
|
|
21
|
+
},
|
|
22
|
+
trainingInformation: {
|
|
23
|
+
modelID: 'simple_face-model',
|
|
24
|
+
epochs: 50,
|
|
25
|
+
modelURL: 'https://storage.googleapis.com/deai-313515.appspot.com/models/mobileNetV2_35_alpha_2_classes/model.json',
|
|
26
|
+
roundDuration: 1,
|
|
27
|
+
validationSplit: 0.2,
|
|
28
|
+
batchSize: 10,
|
|
29
|
+
preprocessingFunctions: [__1.data.ImagePreprocessing.Normalize],
|
|
30
|
+
learningRate: 0.001,
|
|
31
|
+
modelCompileData: {
|
|
32
|
+
optimizer: 'sgd',
|
|
33
|
+
loss: 'categoricalCrossentropy',
|
|
34
|
+
metrics: ['accuracy']
|
|
35
|
+
},
|
|
36
|
+
dataType: 'image',
|
|
37
|
+
IMAGE_H: 200,
|
|
38
|
+
IMAGE_W: 200,
|
|
39
|
+
LABEL_LIST: ['child', 'adult'],
|
|
40
|
+
scheme: 'Federated',
|
|
41
|
+
noiseScale: undefined,
|
|
42
|
+
clippingRadius: undefined
|
|
43
|
+
}
|
|
44
|
+
};
|
|
45
|
+
},
|
|
46
|
+
getModel: function () {
|
|
47
|
+
return (0, tslib_1.__awaiter)(this, void 0, void 0, function () {
|
|
48
|
+
return (0, tslib_1.__generator)(this, function (_a) {
|
|
49
|
+
throw new Error('Not implemented');
|
|
50
|
+
});
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
};
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.titanic = void 0;
|
|
4
|
+
var tslib_1 = require("tslib");
|
|
5
|
+
var __1 = require("..");
|
|
6
|
+
exports.titanic = {
|
|
7
|
+
getTask: function () {
|
|
8
|
+
return {
|
|
9
|
+
taskID: 'titanic',
|
|
10
|
+
displayInformation: {
|
|
11
|
+
taskTitle: 'Titanic',
|
|
12
|
+
summary: {
|
|
13
|
+
preview: "Test our platform by using a publicly available <b>tabular</b> dataset. <br><br> Download the passenger list from the Titanic shipwreck here: <a class='underline text-primary-dark dark:text-primary-light' href='https://github.com/epfml/disco/raw/develop/example_training_data/titanic_train.csv'>titanic_train.csv</a> (more info <a class='underline text-primary-dark dark:text-primary-light' href='https://www.kaggle.com/c/titanic'>here</a>). <br> This model predicts the type of person most likely to survive/die in the historic ship accident, based on their characteristics (sex, age, class etc.).",
|
|
14
|
+
overview: 'We all know the unfortunate story of the Titanic: this flamboyant new transatlantic boat that sunk in 1912 in the North Atlantic Ocean. Today, we revist this tragedy by trying to predict the survival odds of the passenger given some basic features.'
|
|
15
|
+
},
|
|
16
|
+
model: 'The current model does not normalize the given data and applies only a very simple pre-processing of the data.',
|
|
17
|
+
tradeoffs: 'We are using a small model for this task: 4 fully connected layers with few neurons. This allows fast training but can yield to reduced accuracy.',
|
|
18
|
+
dataFormatInformation: 'This model takes as input a CSV file with 12 columns. The features are general information about the passenger (sex, age, name, etc.) and specific related Titanic data such as the ticket class bought by the passenger, its cabin number, etc.<br><br>pclass: A proxy for socio-economic status (SES)<br>1st = Upper<br>2nd = Middle<br>3rd = Lower<br><br>age: Age is fractional if less than 1. If the age is estimated, it is in the form of xx.5<br><br>sibsp: The dataset defines family relations in this way:<br>Sibling = brother, sister, stepbrother, stepsister<br>Spouse = husband, wife (mistresses and fiancés were ignored)<br><br>parch: The dataset defines family relations in this way:<br>Parent = mother, father<br>Child = daughter, son, stepdaughter, stepson<br>Some children travelled only with a nanny, therefore parch=0 for them.<br><br>The first line of the CSV contains the header:<br> PassengerId, Survived, Pclass, Name, Sex, Age, SibSp, Parch, Ticket, Fare, Cabin, Embarked<br><br>Each susequent row contains the corresponding data.',
|
|
19
|
+
dataExampleText: 'Below one can find an example of a datapoint taken as input by our model. In this datapoint, the person is young man named Owen Harris that unfortunnalty perished with the Titanic. He boarded the boat in South Hamptons and was a 3rd class passenger. On the testing & validation page, the data should not contain the label column (Survived).',
|
|
20
|
+
dataExample: [
|
|
21
|
+
{ columnName: 'PassengerId', columnData: '1' },
|
|
22
|
+
{ columnName: 'Survived', columnData: '0' },
|
|
23
|
+
{ columnName: 'Name', columnData: 'Braund, Mr. Owen Harris' },
|
|
24
|
+
{ columnName: 'Sex', columnData: 'male' },
|
|
25
|
+
{ columnName: 'Age', columnData: '22' },
|
|
26
|
+
{ columnName: 'SibSp', columnData: '1' },
|
|
27
|
+
{ columnName: 'Parch', columnData: '0' },
|
|
28
|
+
{ columnName: 'Ticket', columnData: '1/5 21171' },
|
|
29
|
+
{ columnName: 'Fare', columnData: '7.25' },
|
|
30
|
+
{ columnName: 'Cabin', columnData: 'E46' },
|
|
31
|
+
{ columnName: 'Embarked', columnData: 'S' },
|
|
32
|
+
{ columnName: 'Pclass', columnData: '3' }
|
|
33
|
+
],
|
|
34
|
+
headers: [
|
|
35
|
+
'PassengerId',
|
|
36
|
+
'Survived',
|
|
37
|
+
'Name',
|
|
38
|
+
'Sex',
|
|
39
|
+
'Age',
|
|
40
|
+
'SibSp',
|
|
41
|
+
'Parch',
|
|
42
|
+
'Ticket',
|
|
43
|
+
'Fare',
|
|
44
|
+
'Cabin',
|
|
45
|
+
'Embarked',
|
|
46
|
+
'Pclass'
|
|
47
|
+
]
|
|
48
|
+
},
|
|
49
|
+
trainingInformation: {
|
|
50
|
+
modelID: 'titanic-model',
|
|
51
|
+
epochs: 20,
|
|
52
|
+
roundDuration: 10,
|
|
53
|
+
validationSplit: 0,
|
|
54
|
+
batchSize: 30,
|
|
55
|
+
preprocessingFunctions: [],
|
|
56
|
+
modelCompileData: {
|
|
57
|
+
optimizer: 'rmsprop',
|
|
58
|
+
loss: 'binaryCrossentropy',
|
|
59
|
+
metrics: ['accuracy']
|
|
60
|
+
},
|
|
61
|
+
dataType: 'tabular',
|
|
62
|
+
inputColumns: [
|
|
63
|
+
'PassengerId',
|
|
64
|
+
'Age',
|
|
65
|
+
'SibSp',
|
|
66
|
+
'Parch',
|
|
67
|
+
'Fare',
|
|
68
|
+
'Pclass'
|
|
69
|
+
],
|
|
70
|
+
outputColumns: [
|
|
71
|
+
'Survived'
|
|
72
|
+
],
|
|
73
|
+
scheme: 'Federated',
|
|
74
|
+
noiseScale: undefined,
|
|
75
|
+
clippingRadius: undefined
|
|
76
|
+
}
|
|
77
|
+
};
|
|
78
|
+
},
|
|
79
|
+
getModel: function () {
|
|
80
|
+
return (0, tslib_1.__awaiter)(this, void 0, void 0, function () {
|
|
81
|
+
var model;
|
|
82
|
+
return (0, tslib_1.__generator)(this, function (_a) {
|
|
83
|
+
model = __1.tf.sequential();
|
|
84
|
+
model.add(__1.tf.layers.dense({
|
|
85
|
+
inputShape: [6],
|
|
86
|
+
units: 124,
|
|
87
|
+
activation: 'relu',
|
|
88
|
+
kernelInitializer: 'leCunNormal'
|
|
89
|
+
}));
|
|
90
|
+
model.add(__1.tf.layers.dense({ units: 64, activation: 'relu' }));
|
|
91
|
+
model.add(__1.tf.layers.dense({ units: 32, activation: 'relu' }));
|
|
92
|
+
model.add(__1.tf.layers.dense({ units: 1, activation: 'sigmoid' }));
|
|
93
|
+
return [2 /*return*/, model];
|
|
94
|
+
});
|
|
95
|
+
});
|
|
96
|
+
}
|
|
97
|
+
};
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
export * as tf from '@tensorflow/tfjs';
|
|
2
|
+
export * as data from './dataset';
|
|
3
|
+
export * as serialization from './serialization';
|
|
4
|
+
export * as training from './training';
|
|
5
|
+
export * as privacy from './privacy';
|
|
6
|
+
export { GraphInformant, TrainingInformant, informant } from './informant';
|
|
7
|
+
export { Base as Client } from './client';
|
|
8
|
+
export * as client from './client';
|
|
9
|
+
export { WeightsContainer, aggregation } from './weights';
|
|
10
|
+
export { AsyncBuffer } from './async_buffer';
|
|
11
|
+
export { AsyncInformant } from './async_informant';
|
|
12
|
+
export { Logger, ConsoleLogger, TrainerLog } from './logging';
|
|
13
|
+
export { Memory, ModelType, ModelInfo, Path, ModelSource, Empty as EmptyMemory } from './memory';
|
|
14
|
+
export { Disco, TrainingSchemes } from './training';
|
|
15
|
+
export { Validator } from './validation';
|
|
16
|
+
export * from './task';
|
|
17
|
+
export * as defaultTasks from './default_tasks';
|
|
18
|
+
export * from './types';
|