flwr 1.24.0__py3-none-any.whl → 1.25.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. flwr/cli/app_cmd/review.py +13 -3
  2. flwr/cli/federation/show.py +4 -3
  3. flwr/cli/ls.py +44 -3
  4. flwr/cli/new/new.py +106 -297
  5. flwr/cli/run/run.py +12 -17
  6. flwr/cli/run_utils.py +23 -5
  7. flwr/cli/stop.py +1 -1
  8. flwr/cli/supernode/ls.py +10 -5
  9. flwr/cli/utils.py +0 -137
  10. flwr/client/grpc_adapter_client/connection.py +2 -2
  11. flwr/client/grpc_rere_client/connection.py +6 -3
  12. flwr/client/rest_client/connection.py +6 -4
  13. flwr/common/serde.py +6 -0
  14. flwr/common/typing.py +6 -0
  15. flwr/proto/fleet_pb2.py +10 -10
  16. flwr/proto/fleet_pb2.pyi +5 -1
  17. flwr/proto/run_pb2.py +24 -24
  18. flwr/proto/run_pb2.pyi +10 -1
  19. flwr/server/app.py +1 -0
  20. flwr/server/superlink/fleet/message_handler/message_handler.py +41 -2
  21. flwr/server/superlink/linkstate/in_memory_linkstate.py +34 -0
  22. flwr/server/superlink/linkstate/linkstate.py +32 -0
  23. flwr/server/superlink/linkstate/sqlite_linkstate.py +60 -3
  24. flwr/supercore/constant.py +3 -0
  25. flwr/supercore/utils.py +190 -0
  26. flwr/superlink/servicer/control/control_grpc.py +2 -0
  27. flwr/superlink/servicer/control/control_servicer.py +88 -5
  28. flwr/supernode/nodestate/in_memory_nodestate.py +62 -1
  29. flwr/supernode/nodestate/nodestate.py +45 -0
  30. flwr/supernode/servicer/clientappio/clientappio_servicer.py +7 -1
  31. flwr/supernode/start_client_internal.py +7 -4
  32. {flwr-1.24.0.dist-info → flwr-1.25.0.dist-info}/METADATA +2 -4
  33. {flwr-1.24.0.dist-info → flwr-1.25.0.dist-info}/RECORD +35 -96
  34. flwr/cli/new/templates/__init__.py +0 -15
  35. flwr/cli/new/templates/app/.gitignore.tpl +0 -163
  36. flwr/cli/new/templates/app/LICENSE.tpl +0 -202
  37. flwr/cli/new/templates/app/README.baseline.md.tpl +0 -127
  38. flwr/cli/new/templates/app/README.flowertune.md.tpl +0 -68
  39. flwr/cli/new/templates/app/README.md.tpl +0 -37
  40. flwr/cli/new/templates/app/__init__.py +0 -15
  41. flwr/cli/new/templates/app/code/__init__.baseline.py.tpl +0 -1
  42. flwr/cli/new/templates/app/code/__init__.py +0 -15
  43. flwr/cli/new/templates/app/code/__init__.py.tpl +0 -1
  44. flwr/cli/new/templates/app/code/__init__.pytorch_legacy_api.py.tpl +0 -1
  45. flwr/cli/new/templates/app/code/client.baseline.py.tpl +0 -75
  46. flwr/cli/new/templates/app/code/client.huggingface.py.tpl +0 -93
  47. flwr/cli/new/templates/app/code/client.jax.py.tpl +0 -71
  48. flwr/cli/new/templates/app/code/client.mlx.py.tpl +0 -102
  49. flwr/cli/new/templates/app/code/client.numpy.py.tpl +0 -46
  50. flwr/cli/new/templates/app/code/client.pytorch.py.tpl +0 -80
  51. flwr/cli/new/templates/app/code/client.pytorch_legacy_api.py.tpl +0 -55
  52. flwr/cli/new/templates/app/code/client.sklearn.py.tpl +0 -108
  53. flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +0 -82
  54. flwr/cli/new/templates/app/code/client.xgboost.py.tpl +0 -110
  55. flwr/cli/new/templates/app/code/dataset.baseline.py.tpl +0 -36
  56. flwr/cli/new/templates/app/code/flwr_tune/__init__.py +0 -15
  57. flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl +0 -92
  58. flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl +0 -87
  59. flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl +0 -56
  60. flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl +0 -73
  61. flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl +0 -78
  62. flwr/cli/new/templates/app/code/model.baseline.py.tpl +0 -66
  63. flwr/cli/new/templates/app/code/server.baseline.py.tpl +0 -43
  64. flwr/cli/new/templates/app/code/server.huggingface.py.tpl +0 -42
  65. flwr/cli/new/templates/app/code/server.jax.py.tpl +0 -39
  66. flwr/cli/new/templates/app/code/server.mlx.py.tpl +0 -41
  67. flwr/cli/new/templates/app/code/server.numpy.py.tpl +0 -38
  68. flwr/cli/new/templates/app/code/server.pytorch.py.tpl +0 -41
  69. flwr/cli/new/templates/app/code/server.pytorch_legacy_api.py.tpl +0 -31
  70. flwr/cli/new/templates/app/code/server.sklearn.py.tpl +0 -44
  71. flwr/cli/new/templates/app/code/server.tensorflow.py.tpl +0 -38
  72. flwr/cli/new/templates/app/code/server.xgboost.py.tpl +0 -56
  73. flwr/cli/new/templates/app/code/strategy.baseline.py.tpl +0 -1
  74. flwr/cli/new/templates/app/code/task.huggingface.py.tpl +0 -98
  75. flwr/cli/new/templates/app/code/task.jax.py.tpl +0 -57
  76. flwr/cli/new/templates/app/code/task.mlx.py.tpl +0 -102
  77. flwr/cli/new/templates/app/code/task.numpy.py.tpl +0 -7
  78. flwr/cli/new/templates/app/code/task.pytorch.py.tpl +0 -99
  79. flwr/cli/new/templates/app/code/task.pytorch_legacy_api.py.tpl +0 -111
  80. flwr/cli/new/templates/app/code/task.sklearn.py.tpl +0 -67
  81. flwr/cli/new/templates/app/code/task.tensorflow.py.tpl +0 -52
  82. flwr/cli/new/templates/app/code/task.xgboost.py.tpl +0 -67
  83. flwr/cli/new/templates/app/code/utils.baseline.py.tpl +0 -1
  84. flwr/cli/new/templates/app/pyproject.baseline.toml.tpl +0 -146
  85. flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl +0 -80
  86. flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl +0 -65
  87. flwr/cli/new/templates/app/pyproject.jax.toml.tpl +0 -52
  88. flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +0 -56
  89. flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +0 -49
  90. flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +0 -53
  91. flwr/cli/new/templates/app/pyproject.pytorch_legacy_api.toml.tpl +0 -53
  92. flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +0 -52
  93. flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +0 -53
  94. flwr/cli/new/templates/app/pyproject.xgboost.toml.tpl +0 -61
  95. {flwr-1.24.0.dist-info → flwr-1.25.0.dist-info}/WHEEL +0 -0
  96. {flwr-1.24.0.dist-info → flwr-1.25.0.dist-info}/entry_points.txt +0 -0
@@ -1,102 +0,0 @@
1
- """$project_name: A Flower / $framework_str app."""
2
-
3
- import mlx.core as mx
4
- import mlx.nn as nn
5
- import numpy as np
6
- from flwr_datasets import FederatedDataset
7
- from flwr_datasets.partitioner import IidPartitioner
8
-
9
- from datasets.utils.logging import disable_progress_bar
10
-
11
- disable_progress_bar()
12
-
13
-
14
- class MLP(nn.Module):
15
- """A simple MLP."""
16
-
17
- def __init__(
18
- self, num_layers: int, input_dim: int, hidden_dim: int, output_dim: int
19
- ):
20
- super().__init__()
21
- layer_sizes = [input_dim] + [hidden_dim] * num_layers + [output_dim]
22
- self.layers = [
23
- nn.Linear(idim, odim)
24
- for idim, odim in zip(layer_sizes[:-1], layer_sizes[1:])
25
- ]
26
-
27
- def __call__(self, x):
28
- for l in self.layers[:-1]:
29
- x = mx.maximum(l(x), 0.0)
30
- return self.layers[-1](x)
31
-
32
-
33
- def loss_fn(model, X, y):
34
- return mx.mean(nn.losses.cross_entropy(model(X), y))
35
-
36
-
37
- def eval_fn(model, X, y):
38
- return mx.mean(mx.argmax(model(X), axis=1) == y)
39
-
40
-
41
- def batch_iterate(batch_size, X, y):
42
- perm = mx.array(np.random.permutation(y.size))
43
- for s in range(0, y.size, batch_size):
44
- ids = perm[s : s + batch_size]
45
- yield X[ids], y[ids]
46
-
47
-
48
- fds = None # Cache FederatedDataset
49
-
50
-
51
- def load_data(partition_id: int, num_partitions: int):
52
- # Only initialize `FederatedDataset` once
53
- global fds
54
- if fds is None:
55
- partitioner = IidPartitioner(num_partitions=num_partitions)
56
- fds = FederatedDataset(
57
- dataset="ylecun/mnist",
58
- partitioners={"train": partitioner},
59
- trust_remote_code=True,
60
- )
61
- partition = fds.load_partition(partition_id)
62
- partition_splits = partition.train_test_split(test_size=0.2, seed=42)
63
-
64
- partition_splits["train"].set_format("numpy")
65
- partition_splits["test"].set_format("numpy")
66
-
67
- train_partition = partition_splits["train"].map(
68
- lambda img: {
69
- "img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0
70
- },
71
- input_columns="image",
72
- )
73
- test_partition = partition_splits["test"].map(
74
- lambda img: {
75
- "img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0
76
- },
77
- input_columns="image",
78
- )
79
-
80
- data = (
81
- train_partition["img"],
82
- train_partition["label"].astype(np.uint32),
83
- test_partition["img"],
84
- test_partition["label"].astype(np.uint32),
85
- )
86
-
87
- train_images, train_labels, test_images, test_labels = map(mx.array, data)
88
- return train_images, train_labels, test_images, test_labels
89
-
90
-
91
- def get_params(model):
92
- layers = model.parameters()["layers"]
93
- return [np.array(val) for layer in layers for _, val in layer.items()]
94
-
95
-
96
- def set_params(model, parameters):
97
- new_params = {}
98
- new_params["layers"] = [
99
- {"weight": mx.array(parameters[i]), "bias": mx.array(parameters[i + 1])}
100
- for i in range(0, len(parameters), 2)
101
- ]
102
- model.update(new_params)
@@ -1,7 +0,0 @@
1
- """$project_name: A Flower / $framework_str app."""
2
-
3
- import numpy as np
4
-
5
-
6
- def get_dummy_model():
7
- return [np.ones((1, 1))]
@@ -1,99 +0,0 @@
1
- """$project_name: A Flower / $framework_str app."""
2
-
3
- import torch
4
- import torch.nn as nn
5
- import torch.nn.functional as F
6
- from flwr_datasets import FederatedDataset
7
- from flwr_datasets.partitioner import IidPartitioner
8
- from torch.utils.data import DataLoader
9
- from torchvision.transforms import Compose, Normalize, ToTensor
10
-
11
-
12
- class Net(nn.Module):
13
- """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')"""
14
-
15
- def __init__(self):
16
- super(Net, self).__init__()
17
- self.conv1 = nn.Conv2d(3, 6, 5)
18
- self.pool = nn.MaxPool2d(2, 2)
19
- self.conv2 = nn.Conv2d(6, 16, 5)
20
- self.fc1 = nn.Linear(16 * 5 * 5, 120)
21
- self.fc2 = nn.Linear(120, 84)
22
- self.fc3 = nn.Linear(84, 10)
23
-
24
- def forward(self, x):
25
- x = self.pool(F.relu(self.conv1(x)))
26
- x = self.pool(F.relu(self.conv2(x)))
27
- x = x.view(-1, 16 * 5 * 5)
28
- x = F.relu(self.fc1(x))
29
- x = F.relu(self.fc2(x))
30
- return self.fc3(x)
31
-
32
-
33
- fds = None # Cache FederatedDataset
34
-
35
- pytorch_transforms = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
36
-
37
-
38
- def apply_transforms(batch):
39
- """Apply transforms to the partition from FederatedDataset."""
40
- batch["img"] = [pytorch_transforms(img) for img in batch["img"]]
41
- return batch
42
-
43
-
44
- def load_data(partition_id: int, num_partitions: int):
45
- """Load partition CIFAR10 data."""
46
- # Only initialize `FederatedDataset` once
47
- global fds
48
- if fds is None:
49
- partitioner = IidPartitioner(num_partitions=num_partitions)
50
- fds = FederatedDataset(
51
- dataset="uoft-cs/cifar10",
52
- partitioners={"train": partitioner},
53
- )
54
- partition = fds.load_partition(partition_id)
55
- # Divide data on each node: 80% train, 20% test
56
- partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
57
- # Construct dataloaders
58
- partition_train_test = partition_train_test.with_transform(apply_transforms)
59
- trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True)
60
- testloader = DataLoader(partition_train_test["test"], batch_size=32)
61
- return trainloader, testloader
62
-
63
-
64
- def train(net, trainloader, epochs, lr, device):
65
- """Train the model on the training set."""
66
- net.to(device) # move model to GPU if available
67
- criterion = torch.nn.CrossEntropyLoss().to(device)
68
- optimizer = torch.optim.Adam(net.parameters(), lr=lr)
69
- net.train()
70
- running_loss = 0.0
71
- for _ in range(epochs):
72
- for batch in trainloader:
73
- images = batch["img"].to(device)
74
- labels = batch["label"].to(device)
75
- optimizer.zero_grad()
76
- loss = criterion(net(images), labels)
77
- loss.backward()
78
- optimizer.step()
79
- running_loss += loss.item()
80
- avg_trainloss = running_loss / len(trainloader)
81
- return avg_trainloss
82
-
83
-
84
- def test(net, testloader, device):
85
- """Validate the model on the test set."""
86
- net.to(device)
87
- net.eval()
88
- criterion = torch.nn.CrossEntropyLoss()
89
- correct, loss = 0, 0.0
90
- with torch.no_grad():
91
- for batch in testloader:
92
- images = batch["img"].to(device)
93
- labels = batch["label"].to(device)
94
- outputs = net(images)
95
- loss += criterion(outputs, labels).item()
96
- correct += (torch.max(outputs.data, 1)[1] == labels).sum().item()
97
- accuracy = correct / len(testloader.dataset)
98
- loss = loss / len(testloader)
99
- return loss, accuracy
@@ -1,111 +0,0 @@
1
- """$project_name: A Flower / $framework_str app."""
2
-
3
- from collections import OrderedDict
4
-
5
- import torch
6
- import torch.nn as nn
7
- import torch.nn.functional as F
8
- from flwr_datasets import FederatedDataset
9
- from flwr_datasets.partitioner import IidPartitioner
10
- from torch.utils.data import DataLoader
11
- from torchvision.transforms import Compose, Normalize, ToTensor
12
-
13
-
14
- class Net(nn.Module):
15
- """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')"""
16
-
17
- def __init__(self):
18
- super(Net, self).__init__()
19
- self.conv1 = nn.Conv2d(3, 6, 5)
20
- self.pool = nn.MaxPool2d(2, 2)
21
- self.conv2 = nn.Conv2d(6, 16, 5)
22
- self.fc1 = nn.Linear(16 * 5 * 5, 120)
23
- self.fc2 = nn.Linear(120, 84)
24
- self.fc3 = nn.Linear(84, 10)
25
-
26
- def forward(self, x):
27
- x = self.pool(F.relu(self.conv1(x)))
28
- x = self.pool(F.relu(self.conv2(x)))
29
- x = x.view(-1, 16 * 5 * 5)
30
- x = F.relu(self.fc1(x))
31
- x = F.relu(self.fc2(x))
32
- return self.fc3(x)
33
-
34
-
35
- fds = None # Cache FederatedDataset
36
-
37
-
38
- def load_data(partition_id: int, num_partitions: int):
39
- """Load partition CIFAR10 data."""
40
- # Only initialize `FederatedDataset` once
41
- global fds
42
- if fds is None:
43
- partitioner = IidPartitioner(num_partitions=num_partitions)
44
- fds = FederatedDataset(
45
- dataset="uoft-cs/cifar10",
46
- partitioners={"train": partitioner},
47
- )
48
- partition = fds.load_partition(partition_id)
49
- # Divide data on each node: 80% train, 20% test
50
- partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
51
- pytorch_transforms = Compose(
52
- [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
53
- )
54
-
55
- def apply_transforms(batch):
56
- """Apply transforms to the partition from FederatedDataset."""
57
- batch["img"] = [pytorch_transforms(img) for img in batch["img"]]
58
- return batch
59
-
60
- partition_train_test = partition_train_test.with_transform(apply_transforms)
61
- trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True)
62
- testloader = DataLoader(partition_train_test["test"], batch_size=32)
63
- return trainloader, testloader
64
-
65
-
66
- def train(net, trainloader, epochs, device):
67
- """Train the model on the training set."""
68
- net.to(device) # move model to GPU if available
69
- criterion = torch.nn.CrossEntropyLoss().to(device)
70
- optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
71
- net.train()
72
- running_loss = 0.0
73
- for _ in range(epochs):
74
- for batch in trainloader:
75
- images = batch["img"]
76
- labels = batch["label"]
77
- optimizer.zero_grad()
78
- loss = criterion(net(images.to(device)), labels.to(device))
79
- loss.backward()
80
- optimizer.step()
81
- running_loss += loss.item()
82
-
83
- avg_trainloss = running_loss / len(trainloader)
84
- return avg_trainloss
85
-
86
-
87
- def test(net, testloader, device):
88
- """Validate the model on the test set."""
89
- net.to(device)
90
- criterion = torch.nn.CrossEntropyLoss()
91
- correct, loss = 0, 0.0
92
- with torch.no_grad():
93
- for batch in testloader:
94
- images = batch["img"].to(device)
95
- labels = batch["label"].to(device)
96
- outputs = net(images)
97
- loss += criterion(outputs, labels).item()
98
- correct += (torch.max(outputs.data, 1)[1] == labels).sum().item()
99
- accuracy = correct / len(testloader.dataset)
100
- loss = loss / len(testloader)
101
- return loss, accuracy
102
-
103
-
104
- def get_weights(net):
105
- return [val.cpu().numpy() for _, val in net.state_dict().items()]
106
-
107
-
108
- def set_weights(net, parameters):
109
- params_dict = zip(net.state_dict().keys(), parameters)
110
- state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})
111
- net.load_state_dict(state_dict, strict=True)
@@ -1,67 +0,0 @@
1
- """$project_name: A Flower / $framework_str app."""
2
-
3
- import numpy as np
4
- from flwr_datasets import FederatedDataset
5
- from flwr_datasets.partitioner import IidPartitioner
6
- from sklearn.linear_model import LogisticRegression
7
-
8
- fds = None # Cache FederatedDataset
9
-
10
-
11
- def load_data(partition_id: int, num_partitions: int):
12
- """Load partition MNIST data."""
13
- # Only initialize `FederatedDataset` once
14
- global fds
15
- if fds is None:
16
- partitioner = IidPartitioner(num_partitions=num_partitions)
17
- fds = FederatedDataset(
18
- dataset="ylecun/mnist",
19
- partitioners={"train": partitioner},
20
- )
21
-
22
- dataset = fds.load_partition(partition_id, "train").with_format("numpy")
23
-
24
- X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"]
25
-
26
- # Split the on edge data: 80% train, 20% test
27
- X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :]
28
- y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :]
29
-
30
- return X_train, X_test, y_train, y_test
31
-
32
-
33
- def get_model(penalty: str, local_epochs: int):
34
-
35
- return LogisticRegression(
36
- penalty=penalty,
37
- max_iter=local_epochs,
38
- warm_start=True,
39
- )
40
-
41
-
42
- def get_model_params(model):
43
- if model.fit_intercept:
44
- params = [
45
- model.coef_,
46
- model.intercept_,
47
- ]
48
- else:
49
- params = [model.coef_]
50
- return params
51
-
52
-
53
- def set_model_params(model, params):
54
- model.coef_ = params[0]
55
- if model.fit_intercept:
56
- model.intercept_ = params[1]
57
- return model
58
-
59
-
60
- def set_initial_params(model):
61
- n_classes = 10 # MNIST has 10 classes
62
- n_features = 784 # Number of features in dataset
63
- model.classes_ = np.array([i for i in range(10)])
64
-
65
- model.coef_ = np.zeros((n_classes, n_features))
66
- if model.fit_intercept:
67
- model.intercept_ = np.zeros((n_classes,))
@@ -1,52 +0,0 @@
1
- """$project_name: A Flower / $framework_str app."""
2
-
3
- import os
4
-
5
- import keras
6
- from flwr_datasets import FederatedDataset
7
- from flwr_datasets.partitioner import IidPartitioner
8
- from keras import layers
9
-
10
- # Make TensorFlow log less verbose
11
- os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
12
-
13
-
14
- def load_model():
15
- # Define a simple CNN for CIFAR-10 and set Adam optimizer
16
- model = keras.Sequential(
17
- [
18
- keras.Input(shape=(32, 32, 3)),
19
- layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
20
- layers.MaxPooling2D(pool_size=(2, 2)),
21
- layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
22
- layers.MaxPooling2D(pool_size=(2, 2)),
23
- layers.Flatten(),
24
- layers.Dropout(0.5),
25
- layers.Dense(10, activation="softmax"),
26
- ]
27
- )
28
- model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"])
29
- return model
30
-
31
-
32
- fds = None # Cache FederatedDataset
33
-
34
-
35
- def load_data(partition_id, num_partitions):
36
- # Download and partition dataset
37
- # Only initialize `FederatedDataset` once
38
- global fds
39
- if fds is None:
40
- partitioner = IidPartitioner(num_partitions=num_partitions)
41
- fds = FederatedDataset(
42
- dataset="uoft-cs/cifar10",
43
- partitioners={"train": partitioner},
44
- )
45
- partition = fds.load_partition(partition_id, "train")
46
- partition.set_format("numpy")
47
-
48
- # Divide data on each node: 80% train, 20% test
49
- partition = partition.train_test_split(test_size=0.2)
50
- x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"]
51
- x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"]
52
- return x_train, y_train, x_test, y_test
@@ -1,67 +0,0 @@
1
- """$project_name: A Flower / $framework_str app."""
2
-
3
- import xgboost as xgb
4
- from flwr_datasets import FederatedDataset
5
- from flwr_datasets.partitioner import IidPartitioner
6
-
7
-
8
- def train_test_split(partition, test_fraction, seed):
9
- """Split the data into train and validation set given split rate."""
10
- train_test = partition.train_test_split(test_size=test_fraction, seed=seed)
11
- partition_train = train_test["train"]
12
- partition_test = train_test["test"]
13
-
14
- num_train = len(partition_train)
15
- num_test = len(partition_test)
16
-
17
- return partition_train, partition_test, num_train, num_test
18
-
19
-
20
- def transform_dataset_to_dmatrix(data):
21
- """Transform dataset to DMatrix format for xgboost."""
22
- x = data["inputs"]
23
- y = data["label"]
24
- new_data = xgb.DMatrix(x, label=y)
25
- return new_data
26
-
27
-
28
- fds = None # Cache FederatedDataset
29
-
30
-
31
- def load_data(partition_id, num_clients):
32
- """Load partition HIGGS data."""
33
- # Only initialize `FederatedDataset` once
34
- global fds
35
- if fds is None:
36
- partitioner = IidPartitioner(num_partitions=num_clients)
37
- fds = FederatedDataset(
38
- dataset="jxie/higgs",
39
- partitioners={"train": partitioner},
40
- )
41
-
42
- # Load the partition for this `partition_id`
43
- partition = fds.load_partition(partition_id, split="train")
44
- partition.set_format("numpy")
45
-
46
- # Train/test splitting
47
- train_data, valid_data, num_train, num_val = train_test_split(
48
- partition, test_fraction=0.2, seed=42
49
- )
50
-
51
- # Reformat data to DMatrix for xgboost
52
- train_dmatrix = transform_dataset_to_dmatrix(train_data)
53
- valid_dmatrix = transform_dataset_to_dmatrix(valid_data)
54
-
55
- return train_dmatrix, valid_dmatrix, num_train, num_val
56
-
57
-
58
- def replace_keys(input_dict, match="-", target="_"):
59
- """Recursively replace match string with target string in dictionary keys."""
60
- new_dict = {}
61
- for key, value in input_dict.items():
62
- new_key = key.replace(match, target)
63
- if isinstance(value, dict):
64
- new_dict[new_key] = replace_keys(value, match, target)
65
- else:
66
- new_dict[new_key] = value
67
- return new_dict
@@ -1 +0,0 @@
1
- """$project_name: A Flower Baseline."""
@@ -1,146 +0,0 @@
1
- # =====================================================================
2
- # For a full TOML configuration guide, check the Flower docs:
3
- # https://flower.ai/docs/framework/how-to-configure-pyproject-toml.html
4
- # =====================================================================
5
-
6
- [build-system]
7
- requires = ["hatchling"]
8
- build-backend = "hatchling.build"
9
-
10
- [project]
11
- name = "$package_name"
12
- version = "1.0.0"
13
- description = ""
14
- license = "Apache-2.0"
15
- # Dependencies for your Flower App
16
- dependencies = [
17
- "flwr[simulation]>=1.24.0",
18
- "flwr-datasets[vision]>=0.5.0",
19
- "torch==2.8.0",
20
- "torchvision==0.23.0",
21
- ]
22
-
23
- [tool.hatch.metadata]
24
- allow-direct-references = true
25
-
26
- [project.optional-dependencies]
27
- dev = [
28
- "isort==5.13.2",
29
- "black==24.2.0",
30
- "docformatter==1.7.5",
31
- "mypy==1.8.0",
32
- "pylint==3.3.1",
33
- "pytest==7.4.4",
34
- "pytest-watch==4.2.0",
35
- "ruff==0.4.5",
36
- "types-requests==2.31.0.20240125",
37
- ]
38
-
39
- [tool.isort]
40
- profile = "black"
41
-
42
- [tool.black]
43
- line-length = 88
44
- target-version = ["py310", "py311", "py312"]
45
-
46
- [tool.pytest.ini_options]
47
- minversion = "6.2"
48
- addopts = "-qq"
49
-
50
- [tool.mypy]
51
- ignore_missing_imports = true
52
- strict = false
53
- plugins = "numpy.typing.mypy_plugin"
54
-
55
- [tool.pylint."MESSAGES CONTROL"]
56
- disable = "duplicate-code,too-few-public-methods,useless-import-alias"
57
- good-names = "i,j,k,_,x,y,X,Y,K,N"
58
- max-args = 10
59
- max-attributes = 15
60
- max-locals = 36
61
- max-branches = 20
62
- max-statements = 55
63
-
64
- [tool.pylint.typecheck]
65
- generated-members = "numpy.*, torch.*, tensorflow.*"
66
-
67
- [[tool.mypy.overrides]]
68
- module = [
69
- "importlib.metadata.*",
70
- "importlib_metadata.*",
71
- ]
72
- follow_imports = "skip"
73
- follow_imports_for_stubs = true
74
- disallow_untyped_calls = false
75
-
76
- [[tool.mypy.overrides]]
77
- module = "torch.*"
78
- follow_imports = "skip"
79
- follow_imports_for_stubs = true
80
-
81
- [tool.docformatter]
82
- wrap-summaries = 88
83
- wrap-descriptions = 88
84
-
85
- [tool.ruff]
86
- target-version = "py310"
87
- line-length = 88
88
- exclude = [
89
- ".bzr",
90
- ".direnv",
91
- ".eggs",
92
- ".git",
93
- ".hg",
94
- ".mypy_cache",
95
- ".nox",
96
- ".pants.d",
97
- ".pytype",
98
- ".ruff_cache",
99
- ".svn",
100
- ".tox",
101
- ".venv",
102
- "__pypackages__",
103
- "_build",
104
- "buck-out",
105
- "build",
106
- "dist",
107
- "node_modules",
108
- "venv",
109
- "proto",
110
- ]
111
-
112
- [tool.ruff.lint]
113
- select = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"]
114
- fixable = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"]
115
- ignore = ["B024", "B027", "D205", "D209"]
116
-
117
- [tool.ruff.lint.pydocstyle]
118
- convention = "numpy"
119
-
120
- [tool.hatch.build.targets.wheel]
121
- packages = ["."]
122
-
123
- [tool.flwr.app]
124
- publisher = "$username"
125
-
126
- # Point to your ServerApp and ClientApp objects
127
- # Format: "<module>:<object>"
128
- [tool.flwr.app.components]
129
- serverapp = "$import_name.server_app:app"
130
- clientapp = "$import_name.client_app:app"
131
-
132
- # Custom config values accessible via `context.run_config`
133
- [tool.flwr.app.config]
134
- num-server-rounds = 3
135
- fraction-train = 0.5
136
- local-epochs = 1
137
-
138
- # Default federation to use when running the app
139
- [tool.flwr.federations]
140
- default = "local-simulation"
141
-
142
- # Local simulation federation with 10 virtual SuperNodes
143
- [tool.flwr.federations.local-simulation]
144
- options.num-supernodes = 10
145
- options.backend.client-resources.num-cpus = 2
146
- options.backend.client-resources.num-gpus = 0.0