flwr-nightly 1.12.0.dev20241006__py3-none-any.whl → 1.12.0.dev20241008__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flwr-nightly might be problematic. Click here for more details.

Files changed (31) hide show
  1. flwr/cli/new/new.py +2 -0
  2. flwr/cli/new/templates/app/code/client.jax.py.tpl +11 -17
  3. flwr/cli/new/templates/app/code/client.mlx.py.tpl +16 -36
  4. flwr/cli/new/templates/app/code/client.numpy.py.tpl +4 -5
  5. flwr/cli/new/templates/app/code/client.pytorch.py.tpl +8 -11
  6. flwr/cli/new/templates/app/code/client.sklearn.py.tpl +14 -48
  7. flwr/cli/new/templates/app/code/server.jax.py.tpl +9 -3
  8. flwr/cli/new/templates/app/code/server.mlx.py.tpl +13 -2
  9. flwr/cli/new/templates/app/code/server.numpy.py.tpl +7 -2
  10. flwr/cli/new/templates/app/code/server.pytorch.py.tpl +1 -1
  11. flwr/cli/new/templates/app/code/server.sklearn.py.tpl +13 -1
  12. flwr/cli/new/templates/app/code/task.jax.py.tpl +2 -2
  13. flwr/cli/new/templates/app/code/task.mlx.py.tpl +1 -1
  14. flwr/cli/new/templates/app/code/task.numpy.py.tpl +7 -0
  15. flwr/cli/new/templates/app/code/task.pytorch.py.tpl +3 -3
  16. flwr/cli/new/templates/app/code/task.sklearn.py.tpl +67 -0
  17. flwr/cli/new/templates/app/pyproject.jax.toml.tpl +3 -2
  18. flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +1 -0
  19. flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +2 -0
  20. flwr/common/constant.py +6 -0
  21. flwr/common/message.py +32 -15
  22. flwr/server/compat/driver_client_proxy.py +15 -29
  23. flwr/server/driver/inmemory_driver.py +5 -1
  24. flwr/server/strategy/aggregate.py +22 -8
  25. flwr/server/superlink/state/in_memory_state.py +30 -2
  26. flwr/server/superlink/state/sqlite_state.py +29 -2
  27. {flwr_nightly-1.12.0.dev20241006.dist-info → flwr_nightly-1.12.0.dev20241008.dist-info}/METADATA +1 -1
  28. {flwr_nightly-1.12.0.dev20241006.dist-info → flwr_nightly-1.12.0.dev20241008.dist-info}/RECORD +31 -29
  29. {flwr_nightly-1.12.0.dev20241006.dist-info → flwr_nightly-1.12.0.dev20241008.dist-info}/LICENSE +0 -0
  30. {flwr_nightly-1.12.0.dev20241006.dist-info → flwr_nightly-1.12.0.dev20241008.dist-info}/WHEEL +0 -0
  31. {flwr_nightly-1.12.0.dev20241006.dist-info → flwr_nightly-1.12.0.dev20241008.dist-info}/entry_points.txt +0 -0
flwr/cli/new/new.py CHANGED
@@ -240,6 +240,8 @@ def new(
240
240
  MlFramework.HUGGINGFACE.value,
241
241
  MlFramework.MLX.value,
242
242
  MlFramework.TENSORFLOW.value,
243
+ MlFramework.SKLEARN.value,
244
+ MlFramework.NUMPY.value,
243
245
  ]
244
246
  if framework_str in frameworks_with_tasks:
245
247
  files[f"{import_name}/task.py"] = {
@@ -1,9 +1,9 @@
1
1
  """$project_name: A Flower / $framework_str app."""
2
2
 
3
3
  import jax
4
- from flwr.client import NumPyClient, ClientApp
5
- from flwr.common import Context
6
4
 
5
+ from flwr.client import ClientApp, NumPyClient
6
+ from flwr.common import Context
7
7
  from $import_name.task import (
8
8
  evaluation,
9
9
  get_params,
@@ -17,37 +17,31 @@ from $import_name.task import (
17
17
 
18
18
  # Define Flower Client and client_fn
19
19
  class FlowerClient(NumPyClient):
20
- def __init__(self):
20
+ def __init__(self, input_dim):
21
21
  self.train_x, self.train_y, self.test_x, self.test_y = load_data()
22
22
  self.grad_fn = jax.grad(loss_fn)
23
- model_shape = self.train_x.shape[1:]
24
-
25
- self.params = load_model(model_shape)
26
-
27
- def get_parameters(self, config):
28
- return get_params(self.params)
29
-
30
- def set_parameters(self, parameters):
31
- set_params(self.params, parameters)
23
+ self.params = load_model((input_dim,))
32
24
 
33
25
  def fit(self, parameters, config):
34
- self.set_parameters(parameters)
26
+ set_params(self.params, parameters)
35
27
  self.params, loss, num_examples = train(
36
28
  self.params, self.grad_fn, self.train_x, self.train_y
37
29
  )
38
- parameters = self.get_parameters(config={})
39
- return parameters, num_examples, {"loss": float(loss)}
30
+ return get_params(self.params), num_examples, {"loss": float(loss)}
40
31
 
41
32
  def evaluate(self, parameters, config):
42
- self.set_parameters(parameters)
33
+ set_params(self.params, parameters)
43
34
  loss, num_examples = evaluation(
44
35
  self.params, self.grad_fn, self.test_x, self.test_y
45
36
  )
46
37
  return float(loss), num_examples, {"loss": float(loss)}
47
38
 
39
+
48
40
  def client_fn(context: Context):
41
+ input_dim = context.run_config["input-dim"]
42
+
49
43
  # Return Client instance
50
- return FlowerClient().to_client()
44
+ return FlowerClient(input_dim).to_client()
51
45
 
52
46
 
53
47
  # Flower ClientApp
@@ -3,17 +3,18 @@
3
3
  import mlx.core as mx
4
4
  import mlx.nn as nn
5
5
  import mlx.optimizers as optim
6
- from flwr.client import NumPyClient, ClientApp
7
- from flwr.common import Context
8
6
 
7
+ from flwr.client import ClientApp, NumPyClient
8
+ from flwr.common import Context
9
+ from flwr.common.config import UserConfig
9
10
  from $import_name.task import (
11
+ MLP,
10
12
  batch_iterate,
11
13
  eval_fn,
12
14
  get_params,
13
15
  load_data,
14
16
  loss_fn,
15
17
  set_params,
16
- MLP,
17
18
  )
18
19
 
19
20
 
@@ -22,37 +23,24 @@ class FlowerClient(NumPyClient):
22
23
  def __init__(
23
24
  self,
24
25
  data,
25
- num_layers,
26
- hidden_dim,
26
+ run_config: UserConfig,
27
27
  num_classes,
28
- batch_size,
29
- learning_rate,
30
- num_epochs,
31
28
  ):
32
- self.num_layers = num_layers
33
- self.hidden_dim = hidden_dim
34
- self.num_classes = num_classes
35
- self.batch_size = batch_size
36
- self.learning_rate = learning_rate
37
- self.num_epochs = num_epochs
29
+ num_layers = run_config["num-layers"]
30
+ hidden_dim = run_config["hidden-dim"]
31
+ input_dim = run_config["input-dim"]
32
+ batch_size = run_config["batch-size"]
33
+ learning_rate = run_config["lr"]
34
+ self.num_epochs = run_config["local-epochs"]
38
35
 
39
36
  self.train_images, self.train_labels, self.test_images, self.test_labels = data
40
- self.model = MLP(
41
- num_layers, self.train_images.shape[-1], hidden_dim, num_classes
42
- )
37
+ self.model = MLP(num_layers, input_dim, hidden_dim, num_classes)
43
38
  self.optimizer = optim.SGD(learning_rate=learning_rate)
44
39
  self.loss_and_grad_fn = nn.value_and_grad(self.model, loss_fn)
45
- self.num_epochs = num_epochs
46
40
  self.batch_size = batch_size
47
41
 
48
- def get_parameters(self, config):
49
- return get_params(self.model)
50
-
51
- def set_parameters(self, parameters):
52
- set_params(self.model, parameters)
53
-
54
42
  def fit(self, parameters, config):
55
- self.set_parameters(parameters)
43
+ set_params(self.model, parameters)
56
44
  for _ in range(self.num_epochs):
57
45
  for X, y in batch_iterate(
58
46
  self.batch_size, self.train_images, self.train_labels
@@ -60,10 +48,10 @@ class FlowerClient(NumPyClient):
60
48
  _, grads = self.loss_and_grad_fn(self.model, X, y)
61
49
  self.optimizer.update(self.model, grads)
62
50
  mx.eval(self.model.parameters(), self.optimizer.state)
63
- return self.get_parameters(config={}), len(self.train_images), {}
51
+ return get_params(self.model), len(self.train_images), {}
64
52
 
65
53
  def evaluate(self, parameters, config):
66
- self.set_parameters(parameters)
54
+ set_params(self.model, parameters)
67
55
  accuracy = eval_fn(self.model, self.test_images, self.test_labels)
68
56
  loss = loss_fn(self.model, self.test_images, self.test_labels)
69
57
  return loss.item(), len(self.test_images), {"accuracy": accuracy.item()}
@@ -73,18 +61,10 @@ def client_fn(context: Context):
73
61
  partition_id = context.node_config["partition-id"]
74
62
  num_partitions = context.node_config["num-partitions"]
75
63
  data = load_data(partition_id, num_partitions)
76
-
77
- num_layers = context.run_config["num-layers"]
78
- hidden_dim = context.run_config["hidden-dim"]
79
64
  num_classes = 10
80
- batch_size = context.run_config["batch-size"]
81
- learning_rate = context.run_config["lr"]
82
- num_epochs = context.run_config["local-epochs"]
83
65
 
84
66
  # Return Client instance
85
- return FlowerClient(
86
- data, num_layers, hidden_dim, num_classes, batch_size, learning_rate, num_epochs
87
- ).to_client()
67
+ return FlowerClient(data, context.run_config, num_classes).to_client()
88
68
 
89
69
 
90
70
  # Flower ClientApp
@@ -1,16 +1,15 @@
1
1
  """$project_name: A Flower / $framework_str app."""
2
2
 
3
- from flwr.client import NumPyClient, ClientApp
3
+ from flwr.client import ClientApp, NumPyClient
4
4
  from flwr.common import Context
5
- import numpy as np
5
+ from $import_name.task import get_dummy_model
6
6
 
7
7
 
8
8
  class FlowerClient(NumPyClient):
9
- def get_parameters(self, config):
10
- return [np.ones((1, 1))]
11
9
 
12
10
  def fit(self, parameters, config):
13
- return ([np.ones((1, 1))], 1, {})
11
+ model = get_dummy_model()
12
+ return [model], 1, {}
14
13
 
15
14
  def evaluate(self, parameters, config):
16
15
  return float(0.0), 1, {"accuracy": float(1.0)}
@@ -1,17 +1,10 @@
1
1
  """$project_name: A Flower / $framework_str app."""
2
2
 
3
3
  import torch
4
- from flwr.client import NumPyClient, ClientApp
5
- from flwr.common import Context
6
4
 
7
- from $import_name.task import (
8
- Net,
9
- load_data,
10
- get_weights,
11
- set_weights,
12
- train,
13
- test,
14
- )
5
+ from flwr.client import ClientApp, NumPyClient
6
+ from flwr.common import Context
7
+ from $import_name.task import Net, get_weights, load_data, set_weights, test, train
15
8
 
16
9
 
17
10
  # Define Flower Client and client_fn
@@ -32,7 +25,11 @@ class FlowerClient(NumPyClient):
32
25
  self.local_epochs,
33
26
  self.device,
34
27
  )
35
- return get_weights(self.net), len(self.trainloader.dataset), {"train_loss": train_loss}
28
+ return (
29
+ get_weights(self.net),
30
+ len(self.trainloader.dataset),
31
+ {"train_loss": train_loss},
32
+ )
36
33
 
37
34
  def evaluate(self, parameters, config):
38
35
  set_weights(self.net, parameters)
@@ -2,40 +2,17 @@
2
2
 
3
3
  import warnings
4
4
 
5
- import numpy as np
6
- from flwr.client import NumPyClient, ClientApp
7
- from flwr.common import Context
8
- from flwr_datasets import FederatedDataset
9
- from sklearn.linear_model import LogisticRegression
10
5
  from sklearn.metrics import log_loss
11
6
 
12
-
13
- def get_model_parameters(model):
14
- if model.fit_intercept:
15
- params = [
16
- model.coef_,
17
- model.intercept_,
18
- ]
19
- else:
20
- params = [model.coef_]
21
- return params
22
-
23
-
24
- def set_model_params(model, params):
25
- model.coef_ = params[0]
26
- if model.fit_intercept:
27
- model.intercept_ = params[1]
28
- return model
29
-
30
-
31
- def set_initial_params(model):
32
- n_classes = 10 # MNIST has 10 classes
33
- n_features = 784 # Number of features in dataset
34
- model.classes_ = np.array([i for i in range(10)])
35
-
36
- model.coef_ = np.zeros((n_classes, n_features))
37
- if model.fit_intercept:
38
- model.intercept_ = np.zeros((n_classes,))
7
+ from flwr.client import ClientApp, NumPyClient
8
+ from flwr.common import Context
9
+ from $import_name.task import (
10
+ get_model,
11
+ get_model_params,
12
+ load_data,
13
+ set_initial_params,
14
+ set_model_params,
15
+ )
39
16
 
40
17
 
41
18
  class FlowerClient(NumPyClient):
@@ -46,9 +23,6 @@ class FlowerClient(NumPyClient):
46
23
  self.y_train = y_train
47
24
  self.y_test = y_test
48
25
 
49
- def get_parameters(self, config):
50
- return get_model_parameters(self.model)
51
-
52
26
  def fit(self, parameters, config):
53
27
  set_model_params(self.model, parameters)
54
28
 
@@ -57,7 +31,7 @@ class FlowerClient(NumPyClient):
57
31
  warnings.simplefilter("ignore")
58
32
  self.model.fit(self.X_train, self.y_train)
59
33
 
60
- return get_model_parameters(self.model), len(self.X_train), {}
34
+ return get_model_params(self.model), len(self.X_train), {}
61
35
 
62
36
  def evaluate(self, parameters, config):
63
37
  set_model_params(self.model, parameters)
@@ -71,21 +45,13 @@ class FlowerClient(NumPyClient):
71
45
  def client_fn(context: Context):
72
46
  partition_id = context.node_config["partition-id"]
73
47
  num_partitions = context.node_config["num-partitions"]
74
- fds = FederatedDataset(dataset="mnist", partitioners={"train": num_partitions})
75
- dataset = fds.load_partition(partition_id, "train").with_format("numpy")
76
-
77
- X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"]
78
48
 
79
- # Split the on edge data: 80% train, 20% test
80
- X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :]
81
- y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :]
49
+ X_train, X_test, y_train, y_test = load_data(partition_id, num_partitions)
82
50
 
83
51
  # Create LogisticRegression Model
84
- model = LogisticRegression(
85
- penalty="l2",
86
- max_iter=1, # local epoch
87
- warm_start=True, # prevent refreshing weights when fitting
88
- )
52
+ penalty = context.run_config["penalty"]
53
+ local_epochs = context.run_config["local-epochs"]
54
+ model = get_model(penalty, local_epochs)
89
55
 
90
56
  # Setting initial parameters, akin to model.compile for keras models
91
57
  set_initial_params(model)
@@ -1,16 +1,22 @@
1
1
  """$project_name: A Flower / $framework_str app."""
2
2
 
3
- from flwr.common import Context
4
- from flwr.server.strategy import FedAvg
3
+ from flwr.common import Context, ndarrays_to_parameters
5
4
  from flwr.server import ServerApp, ServerAppComponents, ServerConfig
5
+ from flwr.server.strategy import FedAvg
6
+ from $import_name.task import get_params, load_model
6
7
 
7
8
 
8
9
  def server_fn(context: Context):
9
10
  # Read from config
10
11
  num_rounds = context.run_config["num-server-rounds"]
12
+ input_dim = context.run_config["input-dim"]
13
+
14
+ # Initialize global model
15
+ params = get_params(load_model((input_dim,)))
16
+ initial_parameters = ndarrays_to_parameters(params)
11
17
 
12
18
  # Define strategy
13
- strategy = FedAvg()
19
+ strategy = FedAvg(initial_parameters=initial_parameters)
14
20
  config = ServerConfig(num_rounds=num_rounds)
15
21
 
16
22
  return ServerAppComponents(strategy=strategy, config=config)
@@ -1,16 +1,27 @@
1
1
  """$project_name: A Flower / $framework_str app."""
2
2
 
3
- from flwr.common import Context
3
+ from flwr.common import Context, ndarrays_to_parameters
4
4
  from flwr.server import ServerApp, ServerAppComponents, ServerConfig
5
5
  from flwr.server.strategy import FedAvg
6
+ from $import_name.task import MLP, get_params
6
7
 
7
8
 
8
9
  def server_fn(context: Context):
9
10
  # Read from config
10
11
  num_rounds = context.run_config["num-server-rounds"]
11
12
 
13
+ num_classes = 10
14
+ num_layers = context.run_config["num-layers"]
15
+ input_dim = context.run_config["input-dim"]
16
+ hidden_dim = context.run_config["hidden-dim"]
17
+
18
+ # Initialize global model
19
+ model = MLP(num_layers, input_dim, hidden_dim, num_classes)
20
+ params = get_params(model)
21
+ initial_parameters = ndarrays_to_parameters(params)
22
+
12
23
  # Define strategy
13
- strategy = FedAvg()
24
+ strategy = FedAvg(initial_parameters=initial_parameters)
14
25
  config = ServerConfig(num_rounds=num_rounds)
15
26
 
16
27
  return ServerAppComponents(strategy=strategy, config=config)
@@ -1,16 +1,21 @@
1
1
  """$project_name: A Flower / $framework_str app."""
2
2
 
3
- from flwr.common import Context
3
+ from flwr.common import Context, ndarrays_to_parameters
4
4
  from flwr.server import ServerApp, ServerAppComponents, ServerConfig
5
5
  from flwr.server.strategy import FedAvg
6
+ from $import_name.task import get_dummy_model
6
7
 
7
8
 
8
9
  def server_fn(context: Context):
9
10
  # Read from config
10
11
  num_rounds = context.run_config["num-server-rounds"]
11
12
 
13
+ # Initial model
14
+ model = get_dummy_model()
15
+ dummy_parameters = ndarrays_to_parameters([model])
16
+
12
17
  # Define strategy
13
- strategy = FedAvg()
18
+ strategy = FedAvg(initial_parameters=dummy_parameters)
14
19
  config = ServerConfig(num_rounds=num_rounds)
15
20
 
16
21
  return ServerAppComponents(strategy=strategy, config=config)
@@ -3,7 +3,6 @@
3
3
  from flwr.common import Context, ndarrays_to_parameters
4
4
  from flwr.server import ServerApp, ServerAppComponents, ServerConfig
5
5
  from flwr.server.strategy import FedAvg
6
-
7
6
  from $import_name.task import Net, get_weights
8
7
 
9
8
 
@@ -27,5 +26,6 @@ def server_fn(context: Context):
27
26
 
28
27
  return ServerAppComponents(strategy=strategy, config=config)
29
28
 
29
+
30
30
  # Create ServerApp
31
31
  app = ServerApp(server_fn=server_fn)
@@ -1,19 +1,31 @@
1
1
  """$project_name: A Flower / $framework_str app."""
2
2
 
3
- from flwr.common import Context
3
+ from flwr.common import Context, ndarrays_to_parameters
4
4
  from flwr.server import ServerApp, ServerAppComponents, ServerConfig
5
5
  from flwr.server.strategy import FedAvg
6
+ from $import_name.task import get_model, get_model_params, set_initial_params
6
7
 
7
8
 
8
9
  def server_fn(context: Context):
9
10
  # Read from config
10
11
  num_rounds = context.run_config["num-server-rounds"]
11
12
 
13
+ # Create LogisticRegression Model
14
+ penalty = context.run_config["penalty"]
15
+ local_epochs = context.run_config["local-epochs"]
16
+ model = get_model(penalty, local_epochs)
17
+
18
+ # Setting initial parameters, akin to model.compile for keras models
19
+ set_initial_params(model)
20
+
21
+ initial_parameters = ndarrays_to_parameters(get_model_params(model))
22
+
12
23
  # Define strategy
13
24
  strategy = FedAvg(
14
25
  fraction_fit=1.0,
15
26
  fraction_evaluate=1.0,
16
27
  min_available_clients=2,
28
+ initial_parameters=initial_parameters,
17
29
  )
18
30
  config = ServerConfig(num_rounds=num_rounds)
19
31
 
@@ -2,9 +2,9 @@
2
2
 
3
3
  import jax
4
4
  import jax.numpy as jnp
5
+ import numpy as np
5
6
  from sklearn.datasets import make_regression
6
7
  from sklearn.model_selection import train_test_split
7
- import numpy as np
8
8
 
9
9
  key = jax.random.PRNGKey(0)
10
10
 
@@ -33,7 +33,7 @@ def train(params, grad_fn, X, y):
33
33
  num_examples = X.shape[0]
34
34
  for epochs in range(50):
35
35
  grads = grad_fn(params, X, y)
36
- params = jax.tree_map(lambda p, g: p - 0.05 * g, params, grads)
36
+ params = jax.tree.map(lambda p, g: p - 0.05 * g, params, grads)
37
37
  loss = loss_fn(params, X, y)
38
38
  return params, loss, num_examples
39
39
 
@@ -3,10 +3,10 @@
3
3
  import mlx.core as mx
4
4
  import mlx.nn as nn
5
5
  import numpy as np
6
- from datasets.utils.logging import disable_progress_bar
7
6
  from flwr_datasets import FederatedDataset
8
7
  from flwr_datasets.partitioner import IidPartitioner
9
8
 
9
+ from datasets.utils.logging import disable_progress_bar
10
10
 
11
11
  disable_progress_bar()
12
12
 
@@ -0,0 +1,7 @@
1
+ """$project_name: A Flower / $framework_str app."""
2
+
3
+ import numpy as np
4
+
5
+
6
+ def get_dummy_model():
7
+ return np.ones((1, 1))
@@ -5,10 +5,10 @@ from collections import OrderedDict
5
5
  import torch
6
6
  import torch.nn as nn
7
7
  import torch.nn.functional as F
8
- from torch.utils.data import DataLoader
9
- from torchvision.transforms import Compose, Normalize, ToTensor
10
8
  from flwr_datasets import FederatedDataset
11
9
  from flwr_datasets.partitioner import IidPartitioner
10
+ from torch.utils.data import DataLoader
11
+ from torchvision.transforms import Compose, Normalize, ToTensor
12
12
 
13
13
 
14
14
  class Net(nn.Module):
@@ -67,7 +67,7 @@ def train(net, trainloader, epochs, device):
67
67
  """Train the model on the training set."""
68
68
  net.to(device) # move model to GPU if available
69
69
  criterion = torch.nn.CrossEntropyLoss().to(device)
70
- optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9)
70
+ optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
71
71
  net.train()
72
72
  running_loss = 0.0
73
73
  for _ in range(epochs):
@@ -0,0 +1,67 @@
1
+ """$project_name: A Flower / $framework_str app."""
2
+
3
+ import numpy as np
4
+ from flwr_datasets import FederatedDataset
5
+ from flwr_datasets.partitioner import IidPartitioner
6
+ from sklearn.linear_model import LogisticRegression
7
+
8
+ fds = None # Cache FederatedDataset
9
+
10
+
11
+ def load_data(partition_id: int, num_partitions: int):
12
+ """Load partition MNIST data."""
13
+ # Only initialize `FederatedDataset` once
14
+ global fds
15
+ if fds is None:
16
+ partitioner = IidPartitioner(num_partitions=num_partitions)
17
+ fds = FederatedDataset(
18
+ dataset="mnist",
19
+ partitioners={"train": partitioner},
20
+ )
21
+
22
+ dataset = fds.load_partition(partition_id, "train").with_format("numpy")
23
+
24
+ X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"]
25
+
26
+ # Split the on edge data: 80% train, 20% test
27
+ X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :]
28
+ y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :]
29
+
30
+ return X_train, X_test, y_train, y_test
31
+
32
+
33
+ def get_model(penalty: str, local_epochs: int):
34
+
35
+ return LogisticRegression(
36
+ penalty=penalty,
37
+ max_iter=local_epochs,
38
+ warm_start=True,
39
+ )
40
+
41
+
42
+ def get_model_params(model):
43
+ if model.fit_intercept:
44
+ params = [
45
+ model.coef_,
46
+ model.intercept_,
47
+ ]
48
+ else:
49
+ params = [model.coef_]
50
+ return params
51
+
52
+
53
+ def set_model_params(model, params):
54
+ model.coef_ = params[0]
55
+ if model.fit_intercept:
56
+ model.intercept_ = params[1]
57
+ return model
58
+
59
+
60
+ def set_initial_params(model):
61
+ n_classes = 10 # MNIST has 10 classes
62
+ n_features = 784 # Number of features in dataset
63
+ model.classes_ = np.array([i for i in range(10)])
64
+
65
+ model.coef_ = np.zeros((n_classes, n_features))
66
+ if model.fit_intercept:
67
+ model.intercept_ = np.zeros((n_classes,))
@@ -9,8 +9,8 @@ description = ""
9
9
  license = "Apache-2.0"
10
10
  dependencies = [
11
11
  "flwr[simulation]>=1.10.0",
12
- "jax==0.4.13",
13
- "jaxlib==0.4.13",
12
+ "jax==0.4.30",
13
+ "jaxlib==0.4.30",
14
14
  "scikit-learn==1.3.2",
15
15
  ]
16
16
 
@@ -26,6 +26,7 @@ clientapp = "$import_name.client_app:app"
26
26
 
27
27
  [tool.flwr.app.config]
28
28
  num-server-rounds = 3
29
+ input-dim = 3
29
30
 
30
31
  [tool.flwr.federations]
31
32
  default = "local-simulation"
@@ -28,6 +28,7 @@ clientapp = "$import_name.client_app:app"
28
28
  num-server-rounds = 3
29
29
  local-epochs = 1
30
30
  num-layers = 2
31
+ input-dim = 784 # 28*28
31
32
  hidden-dim = 32
32
33
  batch-size = 256
33
34
  lr = 0.1
@@ -25,6 +25,8 @@ clientapp = "$import_name.client_app:app"
25
25
 
26
26
  [tool.flwr.app.config]
27
27
  num-server-rounds = 3
28
+ penalty = "l2"
29
+ local-epochs = 1
28
30
 
29
31
  [tool.flwr.federations]
30
32
  default = "local-simulation"
flwr/common/constant.py CHANGED
@@ -63,7 +63,10 @@ NODE_ID_NUM_BYTES = 8
63
63
 
64
64
  # Constants for FAB
65
65
  APP_DIR = "apps"
66
+ FAB_ALLOWED_EXTENSIONS = {".py", ".toml", ".md"}
66
67
  FAB_CONFIG_FILE = "pyproject.toml"
68
+ FAB_DATE = (2024, 10, 1, 0, 0, 0)
69
+ FAB_HASH_TRUNCATION = 8
67
70
  FLWR_HOME = "FLWR_HOME"
68
71
 
69
72
  # Constants entries in Node config for Simulation
@@ -78,6 +81,9 @@ GRPC_ADAPTER_METADATA_SHOULD_EXIT_KEY = "should-exit"
78
81
  GRPC_ADAPTER_METADATA_MESSAGE_MODULE_KEY = "grpc-message-module"
79
82
  GRPC_ADAPTER_METADATA_MESSAGE_QUALNAME_KEY = "grpc-message-qualname"
80
83
 
84
+ # Message TTL
85
+ MESSAGE_TTL_TOLERANCE = 1e-1
86
+
81
87
 
82
88
  class MessageType:
83
89
  """Message type."""
flwr/common/message.py CHANGED
@@ -17,9 +17,11 @@
17
17
  from __future__ import annotations
18
18
 
19
19
  import time
20
- import warnings
20
+ from logging import WARNING
21
21
  from typing import Optional, cast
22
22
 
23
+ from .constant import MESSAGE_TTL_TOLERANCE
24
+ from .logger import log
23
25
  from .record import RecordSet
24
26
 
25
27
  DEFAULT_TTL = 3600
@@ -289,13 +291,6 @@ class Message:
289
291
 
290
292
  ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)
291
293
  """
292
- if ttl:
293
- warnings.warn(
294
- "A custom TTL was set, but note that the SuperLink does not enforce "
295
- "the TTL yet. The SuperLink will start enforcing the TTL in a future "
296
- "version of Flower.",
297
- stacklevel=2,
298
- )
299
294
  # If no TTL passed, use default for message creation (will update after
300
295
  # message creation)
301
296
  ttl_ = DEFAULT_TTL if ttl is None else ttl
@@ -309,6 +304,8 @@ class Message:
309
304
  )
310
305
  message.metadata.ttl = ttl
311
306
 
307
+ self._limit_task_res_ttl(message)
308
+
312
309
  return message
313
310
 
314
311
  def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message:
@@ -334,13 +331,6 @@ class Message:
334
331
  Message
335
332
  A new `Message` instance representing the reply.
336
333
  """
337
- if ttl:
338
- warnings.warn(
339
- "A custom TTL was set, but note that the SuperLink does not enforce "
340
- "the TTL yet. The SuperLink will start enforcing the TTL in a future "
341
- "version of Flower.",
342
- stacklevel=2,
343
- )
344
334
  # If no TTL passed, use default for message creation (will update after
345
335
  # message creation)
346
336
  ttl_ = DEFAULT_TTL if ttl is None else ttl
@@ -357,6 +347,8 @@ class Message:
357
347
  )
358
348
  message.metadata.ttl = ttl
359
349
 
350
+ self._limit_task_res_ttl(message)
351
+
360
352
  return message
361
353
 
362
354
  def __repr__(self) -> str:
@@ -370,6 +362,31 @@ class Message:
370
362
  )
371
363
  return f"{self.__class__.__qualname__}({view})"
372
364
 
365
+ def _limit_task_res_ttl(self, message: Message) -> None:
366
+ """Limit the TaskRes TTL to not exceed the expiration time of the TaskIns it
367
+ replies to.
368
+
369
+ Parameters
370
+ ----------
371
+ message : Message
372
+ The message to which the TaskRes is replying.
373
+ """
374
+ # Calculate the maximum allowed TTL
375
+ max_allowed_ttl = (
376
+ self.metadata.created_at + self.metadata.ttl - message.metadata.created_at
377
+ )
378
+
379
+ if message.metadata.ttl - max_allowed_ttl > MESSAGE_TTL_TOLERANCE:
380
+ log(
381
+ WARNING,
382
+ "The reply TTL of %.2f seconds exceeded the "
383
+ "allowed maximum of %.2f seconds. "
384
+ "The TTL has been updated to the allowed maximum.",
385
+ message.metadata.ttl,
386
+ max_allowed_ttl,
387
+ )
388
+ message.metadata.ttl = max_allowed_ttl
389
+
373
390
 
374
391
  def _create_reply_metadata(msg: Message, ttl: float) -> Metadata:
375
392
  """Construct metadata for a reply message."""
@@ -15,7 +15,6 @@
15
15
  """Flower ClientProxy implementation for Driver API."""
16
16
 
17
17
 
18
- import time
19
18
  from typing import Optional
20
19
 
21
20
  from flwr import common
@@ -25,8 +24,6 @@ from flwr.server.client_proxy import ClientProxy
25
24
 
26
25
  from ..driver.driver import Driver
27
26
 
28
- SLEEP_TIME = 1
29
-
30
27
 
31
28
  class DriverClientProxy(ClientProxy):
32
29
  """Flower client proxy which delegates work using the Driver API."""
@@ -122,29 +119,18 @@ class DriverClientProxy(ClientProxy):
122
119
  ttl=timeout,
123
120
  )
124
121
 
125
- # Push message
126
- message_ids = list(self.driver.push_messages(messages=[message]))
127
- if len(message_ids) != 1:
128
- raise ValueError("Unexpected number of message_ids")
129
-
130
- message_id = message_ids[0]
131
- if message_id == "":
132
- raise ValueError(f"Failed to send message to node {self.node_id}")
133
-
134
- if timeout:
135
- start_time = time.time()
136
-
137
- while True:
138
- messages = list(self.driver.pull_messages(message_ids))
139
- if len(messages) == 1:
140
- msg: Message = messages[0]
141
- if msg.has_error():
142
- raise ValueError(
143
- f"Message contains an Error (reason: {msg.error.reason}). "
144
- "It originated during client-side execution of a message."
145
- )
146
- return msg.content
147
-
148
- if timeout is not None and time.time() > start_time + timeout:
149
- raise RuntimeError("Timeout reached")
150
- time.sleep(SLEEP_TIME)
122
+ # Send message and wait for reply
123
+ messages = list(self.driver.send_and_receive(messages=[message]))
124
+
125
+ # A single reply is expected
126
+ if len(messages) != 1:
127
+ raise ValueError(f"Expected one Message but got: {len(messages)}")
128
+
129
+ # Only messages without errors can be handled beyond these point
130
+ msg: Message = messages[0]
131
+ if msg.has_error():
132
+ raise ValueError(
133
+ f"Message contains an Error (reason: {msg.error.reason}). "
134
+ "It originated during client-side execution of a message."
135
+ )
136
+ return msg.content
@@ -39,16 +39,20 @@ class InMemoryDriver(Driver):
39
39
  The identifier of the run.
40
40
  state_factory : StateFactory
41
41
  A StateFactory embedding a state that this driver can interface with.
42
+ pull_interval : float (default=0.1)
43
+ Sleep duration between calls to `pull_messages`.
42
44
  """
43
45
 
44
46
  def __init__(
45
47
  self,
46
48
  run_id: int,
47
49
  state_factory: StateFactory,
50
+ pull_interval: float = 0.1,
48
51
  ) -> None:
49
52
  self._run_id = run_id
50
53
  self._run: Optional[Run] = None
51
54
  self.state = state_factory.state()
55
+ self.pull_interval = pull_interval
52
56
  self.node = Node(node_id=0, anonymous=True)
53
57
 
54
58
  def _check_message(self, message: Message) -> None:
@@ -180,5 +184,5 @@ class InMemoryDriver(Driver):
180
184
  if len(msg_ids) == 0:
181
185
  break
182
186
  # Sleep
183
- time.sleep(3)
187
+ time.sleep(self.pull_interval)
184
188
  return ret
@@ -15,8 +15,8 @@
15
15
  """Aggregation functions for strategy implementations."""
16
16
  # mypy: disallow_untyped_calls=False
17
17
 
18
- from functools import reduce
19
- from typing import Any, Callable
18
+ from functools import partial, reduce
19
+ from typing import Any, Callable, Union
20
20
 
21
21
  import numpy as np
22
22
 
@@ -52,17 +52,31 @@ def aggregate_inplace(results: list[tuple[ClientProxy, FitRes]]) -> NDArrays:
52
52
  fit_res.num_examples / num_examples_total for _, fit_res in results
53
53
  ]
54
54
 
55
+ def _try_inplace(
56
+ x: NDArray, y: Union[NDArray, float], np_binary_op: np.ufunc
57
+ ) -> NDArray:
58
+ return ( # type: ignore[no-any-return]
59
+ np_binary_op(x, y, out=x)
60
+ if np.can_cast(y, x.dtype, casting="same_kind")
61
+ else np_binary_op(x, np.array(y, x.dtype), out=x)
62
+ )
63
+
55
64
  # Let's do in-place aggregation
56
65
  # Get first result, then add up each other
57
66
  params = [
58
- scaling_factors[0] * x for x in parameters_to_ndarrays(results[0][1].parameters)
67
+ _try_inplace(x, scaling_factors[0], np_binary_op=np.multiply)
68
+ for x in parameters_to_ndarrays(results[0][1].parameters)
59
69
  ]
60
- for i, (_, fit_res) in enumerate(results[1:]):
70
+
71
+ for i, (_, fit_res) in enumerate(results[1:], start=1):
61
72
  res = (
62
- scaling_factors[i + 1] * x
73
+ _try_inplace(x, scaling_factors[i], np_binary_op=np.multiply)
63
74
  for x in parameters_to_ndarrays(fit_res.parameters)
64
75
  )
65
- params = [reduce(np.add, layer_updates) for layer_updates in zip(params, res)]
76
+ params = [
77
+ reduce(partial(_try_inplace, np_binary_op=np.add), layer_updates)
78
+ for layer_updates in zip(params, res)
79
+ ]
66
80
 
67
81
  return params
68
82
 
@@ -128,7 +142,7 @@ def aggregate_bulyan(
128
142
 
129
143
  Parameters
130
144
  ----------
131
- results: List[Tuple[NDArrays, int]]
145
+ results: list[tuple[NDArrays, int]]
132
146
  Weights and number of samples for each of the client.
133
147
  num_malicious: int
134
148
  The maximum number of malicious clients.
@@ -332,7 +346,7 @@ def _aggregate_n_closest_weights(
332
346
  ----------
333
347
  reference_weights: NDArrays
334
348
  The weights from which the distances will be computed
335
- results: List[Tuple[NDArrays, int]]
349
+ results: list[tuple[NDArrays, int]]
336
350
  The weights from models
337
351
  beta_closest: int
338
352
  The number of the closest distance weights that will be averaged
@@ -17,12 +17,16 @@
17
17
 
18
18
  import threading
19
19
  import time
20
- from logging import ERROR
20
+ from logging import ERROR, WARNING
21
21
  from typing import Optional
22
22
  from uuid import UUID, uuid4
23
23
 
24
24
  from flwr.common import log, now
25
- from flwr.common.constant import NODE_ID_NUM_BYTES, RUN_ID_NUM_BYTES
25
+ from flwr.common.constant import (
26
+ MESSAGE_TTL_TOLERANCE,
27
+ NODE_ID_NUM_BYTES,
28
+ RUN_ID_NUM_BYTES,
29
+ )
26
30
  from flwr.common.typing import Run, UserConfig
27
31
  from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611
28
32
  from flwr.server.superlink.state.state import State
@@ -83,6 +87,7 @@ class InMemoryState(State): # pylint: disable=R0902,R0904
83
87
 
84
88
  # Find TaskIns for node_id that were not delivered yet
85
89
  task_ins_list: list[TaskIns] = []
90
+ current_time = time.time()
86
91
  with self.lock:
87
92
  for _, task_ins in self.task_ins_store.items():
88
93
  # pylint: disable=too-many-boolean-expressions
@@ -91,11 +96,13 @@ class InMemoryState(State): # pylint: disable=R0902,R0904
91
96
  and task_ins.task.consumer.anonymous is False
92
97
  and task_ins.task.consumer.node_id == node_id
93
98
  and task_ins.task.delivered_at == ""
99
+ and task_ins.task.created_at + task_ins.task.ttl > current_time
94
100
  ) or (
95
101
  node_id is None # Anonymous
96
102
  and task_ins.task.consumer.anonymous is True
97
103
  and task_ins.task.consumer.node_id == 0
98
104
  and task_ins.task.delivered_at == ""
105
+ and task_ins.task.created_at + task_ins.task.ttl > current_time
99
106
  ):
100
107
  task_ins_list.append(task_ins)
101
108
  if limit and len(task_ins_list) == limit:
@@ -134,6 +141,27 @@ class InMemoryState(State): # pylint: disable=R0902,R0904
134
141
  )
135
142
  return None
136
143
 
144
+ # Fail if the TaskRes TTL exceeds the
145
+ # expiration time of the TaskIns it replies to.
146
+ # Condition: TaskIns.created_at + TaskIns.ttl ≥
147
+ # TaskRes.created_at + TaskRes.ttl
148
+ # A small tolerance is introduced to account
149
+ # for floating-point precision issues.
150
+ max_allowed_ttl = (
151
+ task_ins.task.created_at + task_ins.task.ttl - task_res.task.created_at
152
+ )
153
+ if task_res.task.ttl and (
154
+ task_res.task.ttl - max_allowed_ttl > MESSAGE_TTL_TOLERANCE
155
+ ):
156
+ log(
157
+ WARNING,
158
+ "Received TaskRes with TTL %.2f "
159
+ "exceeding the allowed maximum TTL %.2f.",
160
+ task_res.task.ttl,
161
+ max_allowed_ttl,
162
+ )
163
+ return None
164
+
137
165
  # Validate run_id
138
166
  if task_res.run_id not in self.run_ids:
139
167
  log(ERROR, "`run_id` is invalid")
@@ -20,12 +20,16 @@ import re
20
20
  import sqlite3
21
21
  import time
22
22
  from collections.abc import Sequence
23
- from logging import DEBUG, ERROR
23
+ from logging import DEBUG, ERROR, WARNING
24
24
  from typing import Any, Optional, Union, cast
25
25
  from uuid import UUID, uuid4
26
26
 
27
27
  from flwr.common import log, now
28
- from flwr.common.constant import NODE_ID_NUM_BYTES, RUN_ID_NUM_BYTES
28
+ from flwr.common.constant import (
29
+ MESSAGE_TTL_TOLERANCE,
30
+ NODE_ID_NUM_BYTES,
31
+ RUN_ID_NUM_BYTES,
32
+ )
29
33
  from flwr.common.typing import Run, UserConfig
30
34
  from flwr.proto.node_pb2 import Node # pylint: disable=E0611
31
35
  from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611
@@ -295,6 +299,7 @@ class SqliteState(State): # pylint: disable=R0904
295
299
  WHERE consumer_anonymous == 1
296
300
  AND consumer_node_id == 0
297
301
  AND delivered_at = ""
302
+ AND (created_at + ttl) > CAST(strftime('%s', 'now') AS REAL)
298
303
  """
299
304
  else:
300
305
  # Convert the uint64 value to sint64 for SQLite
@@ -307,6 +312,7 @@ class SqliteState(State): # pylint: disable=R0904
307
312
  WHERE consumer_anonymous == 0
308
313
  AND consumer_node_id == :node_id
309
314
  AND delivered_at = ""
315
+ AND (created_at + ttl) > CAST(strftime('%s', 'now') AS REAL)
310
316
  """
311
317
 
312
318
  if limit is not None:
@@ -383,6 +389,27 @@ class SqliteState(State): # pylint: disable=R0904
383
389
  )
384
390
  return None
385
391
 
392
+ # Fail if the TaskRes TTL exceeds the
393
+ # expiration time of the TaskIns it replies to.
394
+ # Condition: TaskIns.created_at + TaskIns.ttl ≥
395
+ # TaskRes.created_at + TaskRes.ttl
396
+ # A small tolerance is introduced to account
397
+ # for floating-point precision issues.
398
+ max_allowed_ttl = (
399
+ task_ins["created_at"] + task_ins["ttl"] - task_res.task.created_at
400
+ )
401
+ if task_res.task.ttl and (
402
+ task_res.task.ttl - max_allowed_ttl > MESSAGE_TTL_TOLERANCE
403
+ ):
404
+ log(
405
+ WARNING,
406
+ "Received TaskRes with TTL %.2f "
407
+ "exceeding the allowed maximum TTL %.2f.",
408
+ task_res.task.ttl,
409
+ max_allowed_ttl,
410
+ )
411
+ return None
412
+
386
413
  # Store TaskRes
387
414
  task_res.task_id = str(task_id)
388
415
  data = (task_res_to_dict(task_res),)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: flwr-nightly
3
- Version: 1.12.0.dev20241006
3
+ Version: 1.12.0.dev20241008
4
4
  Summary: Flower: A Friendly Federated Learning Framework
5
5
  Home-page: https://flower.ai
6
6
  License: Apache-2.0
@@ -7,7 +7,7 @@ flwr/cli/example.py,sha256=1bGDYll3BXQY2kRqSN-oICqS5n1b9m0g0RvXTopXHl4,2215
7
7
  flwr/cli/install.py,sha256=t5tdeKOsTmG3nuInUoSKBVzUU1RnzA096yzYs013VhE,7065
8
8
  flwr/cli/log.py,sha256=uhtcLcFGkazirWnEmet3Wt3rt_q-a13kauQqPLaMaRY,8097
9
9
  flwr/cli/new/__init__.py,sha256=cQzK1WH4JP2awef1t2UQ2xjl1agVEz9rwutV18SWV1k,789
10
- flwr/cli/new/new.py,sha256=wpHBmHOq6X04CPwJDaEgu3H5_MsfoEYsYsv3E-EDhzM,9558
10
+ flwr/cli/new/new.py,sha256=uSiG7aXQzPDnikv2YcjQ86OOLqint0hNWCI0fSQD0jI,9634
11
11
  flwr/cli/new/templates/__init__.py,sha256=4luU8RL-CK8JJCstQ_ON809W9bNTkY1l9zSaPKBkgwY,725
12
12
  flwr/cli/new/templates/app/.gitignore.tpl,sha256=XixnHdyeMB2vwkGtGnwHqoWpH-9WChdyG0GXe57duhc,3078
13
13
  flwr/cli/new/templates/app/LICENSE.tpl,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
@@ -20,11 +20,11 @@ flwr/cli/new/templates/app/code/__init__.py,sha256=EM6vfvgAILKPaPn7H1wMV1Wi01WyZ
20
20
  flwr/cli/new/templates/app/code/__init__.py.tpl,sha256=J0Gn74E7khpLyKJVNqOPu7ev93vkcu1PZugsbxtABMw,52
21
21
  flwr/cli/new/templates/app/code/client.baseline.py.tpl,sha256=1htktXX3jXX05r0vuG_afjS1sXGtuONW9EpiQ7vSBes,1901
22
22
  flwr/cli/new/templates/app/code/client.huggingface.py.tpl,sha256=ifD08KwjdoGieV26hFCgf3PQB6rMhj_NZLo5iUUndm8,1846
23
- flwr/cli/new/templates/app/code/client.jax.py.tpl,sha256=c2LDew2V8BUybZJiz1FeB3Kq4ey0Q2s0S5qNPUTNmI4,1490
24
- flwr/cli/new/templates/app/code/client.mlx.py.tpl,sha256=gxipt57ldc741qwRqSWtsLQH05JODKdGMTtvoXiBzDA,2906
25
- flwr/cli/new/templates/app/code/client.numpy.py.tpl,sha256=DMUXvQd2dr-wEn0ZrYJQhZ0OFUT4PKoHXtiD2haWnCI,570
26
- flwr/cli/new/templates/app/code/client.pytorch.py.tpl,sha256=WczaR5avJUhfw2Grn2KEC4tDJ4voIYG-2pAy-7i2cT8,1685
27
- flwr/cli/new/templates/app/code/client.sklearn.py.tpl,sha256=xW9cuKhybk5S8IeDZhbeb0DNegDIJGEYrzMKsxgc2GE,2978
23
+ flwr/cli/new/templates/app/code/client.jax.py.tpl,sha256=4EkcGGmbPAa6dgw8GYII-GfrGsu8VU6amRHpJvF0WuA,1319
24
+ flwr/cli/new/templates/app/code/client.mlx.py.tpl,sha256=gOxt_QUTfGFpofdNaxdwTSLZlkTWHPYGix2OGHC1hYE,2376
25
+ flwr/cli/new/templates/app/code/client.numpy.py.tpl,sha256=DKcnz5-KUf693Va056QTKVofFV3ozJZutK4rQyfvRXc,548
26
+ flwr/cli/new/templates/app/code/client.pytorch.py.tpl,sha256=fuxVmZpjHIueNy_aHWF81531vmi8DGu4CYjYDqmUwWo,1705
27
+ flwr/cli/new/templates/app/code/client.sklearn.py.tpl,sha256=MfhMN-hayGCc3cZ1XpN0A6f67GRveI_tGbq5kjOeP0Q,1871
28
28
  flwr/cli/new/templates/app/code/client.tensorflow.py.tpl,sha256=yBiiU7B9Kf70U52cPkNs_dUpYrrTwbUi2os-PAyheaM,1680
29
29
  flwr/cli/new/templates/app/code/dataset.baseline.py.tpl,sha256=jbd_exHAk2-Blu_kVutjPO6a_dkJQWb232zxSeXIZ1k,1453
30
30
  flwr/cli/new/templates/app/code/flwr_tune/__init__.py,sha256=JgNgBtKdm1jKM9625WxappCAVUGtYAmcjKSsXJ1u3ZQ,748
@@ -36,27 +36,29 @@ flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl,sha256=BhiqRg9w1MGuU5h
36
36
  flwr/cli/new/templates/app/code/model.baseline.py.tpl,sha256=cSz6-IWsnMl7s04DW4URINiIppCIberrtE8NqK6Qz48,2571
37
37
  flwr/cli/new/templates/app/code/server.baseline.py.tpl,sha256=outx7lDXsWS8QXKWOGOiDno6eE8WL7LBD51ZkAuC3WU,1570
38
38
  flwr/cli/new/templates/app/code/server.huggingface.py.tpl,sha256=0PJmnZvR9_VPLSak1yVfkOx3dmqo6cynhY1l2s4AZrE,1158
39
- flwr/cli/new/templates/app/code/server.jax.py.tpl,sha256=pIdUH-LgWRAGWQYLlivMNf8XnDSNDe2cCuRjlxbRzys,529
40
- flwr/cli/new/templates/app/code/server.mlx.py.tpl,sha256=RqiZ0k468SOlm9dcPr-fvA8xcWv4zwDCbJfBwL7P9Us,529
41
- flwr/cli/new/templates/app/code/server.numpy.py.tpl,sha256=RqiZ0k468SOlm9dcPr-fvA8xcWv4zwDCbJfBwL7P9Us,529
42
- flwr/cli/new/templates/app/code/server.pytorch.py.tpl,sha256=DW5c8vzXCvFeIE8YIWBhoGnSdv8Ka_e5wd3F6B3xvp8,916
43
- flwr/cli/new/templates/app/code/server.sklearn.py.tpl,sha256=25Ae3kDqjDdBl8LwkDwye69nevd02Pk_e7F3SQKLdyk,624
39
+ flwr/cli/new/templates/app/code/server.jax.py.tpl,sha256=IHk57syZhvO4nWVHGxE9S8f5DTxRKIrTitDufF4RhMY,828
40
+ flwr/cli/new/templates/app/code/server.mlx.py.tpl,sha256=GAqalaI-U2uRdttNeRn75k1FzdEW3rmgT-ywuKkFdK4,988
41
+ flwr/cli/new/templates/app/code/server.numpy.py.tpl,sha256=xbQlLCKutnOqlbLQPZsaL9WM7vnebTceiU8a0HaUcZk,740
42
+ flwr/cli/new/templates/app/code/server.pytorch.py.tpl,sha256=gvBsGA_Jg9kAH8xTxjzTjMcvBtciuccOwQFbO7ey8tU,916
43
+ flwr/cli/new/templates/app/code/server.sklearn.py.tpl,sha256=JoDYjPU99aKTTfjKsCtKHzMICiOR9pi8JGVBsxFpWO4,1133
44
44
  flwr/cli/new/templates/app/code/server.tensorflow.py.tpl,sha256=xMhQ7AumowgLkgUilgjVK7IbpRhPjslhVJU-vID6NY8,856
45
45
  flwr/cli/new/templates/app/code/strategy.baseline.py.tpl,sha256=YkHAgppUeD2BnBoGfVB6dEvBfjuIPGsU1gw4CiUi3qA,40
46
46
  flwr/cli/new/templates/app/code/task.huggingface.py.tpl,sha256=ua6cAhJYPUCwML20DEucM0F4ZzzsEVQLYrRvhQ7CGRE,3347
47
- flwr/cli/new/templates/app/code/task.jax.py.tpl,sha256=F05eg149c9icRyVNdfcLyZvAXROQ7QhfifoGw_U1dsg,1530
48
- flwr/cli/new/templates/app/code/task.mlx.py.tpl,sha256=jWtCULLRr_9bCIJvoTLMx037-SDl_LF8udtA1UGoXDk,2946
49
- flwr/cli/new/templates/app/code/task.pytorch.py.tpl,sha256=NgbPix74X1t3ybaGjqdls30vF1i5oY3L7EQExhWhN74,3812
47
+ flwr/cli/new/templates/app/code/task.jax.py.tpl,sha256=jK03Y0HUvVFjUB-cYnvYB-WCRdr451aYklP1o6G5rx8,1530
48
+ flwr/cli/new/templates/app/code/task.mlx.py.tpl,sha256=YxH5z4s5kOh5_9DIY9pvzqURckLDfgdanTA68_iM_Wo,2946
49
+ flwr/cli/new/templates/app/code/task.numpy.py.tpl,sha256=MsjJK8DAvM3ex6JTfZuBVqoBKJfCCjIHMUOPkspiSQ0,124
50
+ flwr/cli/new/templates/app/code/task.pytorch.py.tpl,sha256=XlJqA4Ix_PloO_zJLhjiN5vDj16w3I4CPVGdmbe8asE,3800
51
+ flwr/cli/new/templates/app/code/task.sklearn.py.tpl,sha256=SeIIo0rr_6ffn4Qx2xELD18jYXCkcW__NWtYEDXCICM,1843
50
52
  flwr/cli/new/templates/app/code/task.tensorflow.py.tpl,sha256=SKXAZdgBnPpbAbJ90Rb7oQ5ilnopBx_j_JNFoUDeEAI,1732
51
53
  flwr/cli/new/templates/app/code/utils.baseline.py.tpl,sha256=YkHAgppUeD2BnBoGfVB6dEvBfjuIPGsU1gw4CiUi3qA,40
52
54
  flwr/cli/new/templates/app/pyproject.baseline.toml.tpl,sha256=4gi90W9_B1kj6rYkpvVJxhNX9Yctsv9OH6CzXP-dcE4,2666
53
55
  flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl,sha256=bYdDP0O8z741pvy1INnH4UBuP-KFvcyQt6Yo81n4frQ,1853
54
56
  flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl,sha256=CHJgkPNkJfzJhEbTe15uiV3AhOtIddQi-yofPZsCk3E,1143
55
- flwr/cli/new/templates/app/pyproject.jax.toml.tpl,sha256=Tq6jeGcoOKzMwWWYxMVnzMcipLURHLiW69iYlD1ywMg,659
56
- flwr/cli/new/templates/app/pyproject.mlx.toml.tpl,sha256=SHwYAA2qgIlOAU3Sb9BKSZcZ7O9biACg27MHexXUtDw,741
57
+ flwr/cli/new/templates/app/pyproject.jax.toml.tpl,sha256=v1DVriLky0ow9yc0NK91_6VkxkzpPsheIxbb2c0LcYQ,673
58
+ flwr/cli/new/templates/app/pyproject.mlx.toml.tpl,sha256=S4QDy7UXboJt60R3LE7z97_QU1idb0ob8A_N7O3cifo,765
57
59
  flwr/cli/new/templates/app/pyproject.numpy.toml.tpl,sha256=-FCi64ygMgQke3zApUt0XtkIBo3WtQoPAPhtp_FqkPE,612
58
60
  flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl,sha256=vIO1ArukTC76ogYLNmJIl25MOE_nEELj3IcTZZJjohU,710
59
- flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl,sha256=jk_5teoyOVM9QdBea8J-nk10S6TKw81QZiiKB54ATF0,654
61
+ flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl,sha256=fdlIN_sip1mrbOtqpeag60Kj56aYrA-0HEq9lYZLNnM,686
60
62
  flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl,sha256=bRIvPCPvTTI4Eo5b61Rmw8WdDw3sjcohciTXgULN5l8,702
61
63
  flwr/cli/run/__init__.py,sha256=oCd6HmQDx-sqver1gecgx-uMA38BLTSiiKpl7RGNceg,789
62
64
  flwr/cli/run/run.py,sha256=tLKeWpWJqEBxkOtDKEprL4SBKxH1vHGybQxlCLnXiSQ,8386
@@ -101,7 +103,7 @@ flwr/client/typing.py,sha256=dxoTBnTMfqXr5J7G3y-uNjqxYCddvxhu89spfj4Lm2U,1048
101
103
  flwr/common/__init__.py,sha256=TVaoFEJE158aui1TPZQiJCDZX4RNHRyI8I55VC80HhI,3901
102
104
  flwr/common/address.py,sha256=7kM2Rqjw86-c8aKwAvrXerWqznnVv4TFJ62aSAeTn10,3017
103
105
  flwr/common/config.py,sha256=QjsDEDf4xsx8StJV9I80dYWbBp7aBNrJmTlAeLpQpyw,7567
104
- flwr/common/constant.py,sha256=m2FkF5QzFC_72hFxDioekOHwhilPKCds1axojg77oX8,3606
106
+ flwr/common/constant.py,sha256=cUP0lErfb0s1ACnErm_T5kIks0xnEi2X5UNnzmXRSW4,3757
105
107
  flwr/common/context.py,sha256=5Bd9RCrhLkYZOVR7vr97OVhzVBHQkS1fUsYiIKTwpxU,2239
106
108
  flwr/common/date.py,sha256=OcQuwpb2HxcblTqYm6H223ufop5UZw5N_fzalbpOVzY,891
107
109
  flwr/common/differential_privacy.py,sha256=XwcJ3rWr8S8BZUocc76vLSJAXIf6OHnWkBV6-xlIRuw,6106
@@ -110,7 +112,7 @@ flwr/common/dp.py,sha256=vddkvyjV2FhRoN4VuU2LeAM1UBn7dQB8_W-Qdiveal8,1978
110
112
  flwr/common/exit_handlers.py,sha256=MracJaBeoCOC7TaXK9zCJQxhrMSx9ZtczK237qvhBpU,2806
111
113
  flwr/common/grpc.py,sha256=6Yi28JjAll19nxYJlOT9B03RN8dvJZP9zUoR3RSmxoY,2487
112
114
  flwr/common/logger.py,sha256=zAjaGrr_UWMkIdi1xG9tY764qJHIYM8LsPgMfBsyp64,8117
113
- flwr/common/message.py,sha256=QmFYYXA-3e9M8tGO-3NPyAI8yvdmcpdYaA_noR1DE88,13194
115
+ flwr/common/message.py,sha256=QDq7WvzNJqynIGgGQ3ZdrWiZUQBZiNhxAX2HFTmeUcw,13671
114
116
  flwr/common/object_ref.py,sha256=5lgWqYaJR28UdFc-iirWw9YqFXMfgkOOAdfJc1AVibE,8711
115
117
  flwr/common/parameter.py,sha256=-bFAUayToYDF50FZGrBC1hQYJCQDtB2bbr3ZuVLMtdE,2095
116
118
  flwr/common/pyproject.py,sha256=EI_ovbCHGmhYrdPx0RSDi5EkFZFof-8m1PA54c0ZTjc,1385
@@ -204,13 +206,13 @@ flwr/server/client_proxy.py,sha256=4G-oTwhb45sfWLx2uZdcXD98IZwdTS6F88xe3akCdUg,2
204
206
  flwr/server/compat/__init__.py,sha256=VxnJtJyOjNFQXMNi9hIuzNlZM5n0Hj1p3aq_Pm2udw4,892
205
207
  flwr/server/compat/app.py,sha256=5vkHHm_h-4cMthvWD1GJo1ZW3eihytjGgvsgfXUK9gA,3298
206
208
  flwr/server/compat/app_utils.py,sha256=i8MseZQculltLTsRIEe4XUnmAuu3LF3WzGjYi0c-cps,3425
207
- flwr/server/compat/driver_client_proxy.py,sha256=BxTDo7i89VAG2tuF4x7zogSVn2bXPMr0H2H0lERzW9c,5444
209
+ flwr/server/compat/driver_client_proxy.py,sha256=Af0bRUEVZNcCYRxt3DjpLPdvVYpTgz6LSlILtI_8DQY,5010
208
210
  flwr/server/compat/legacy_context.py,sha256=wBzBcfV6YO6IQGriM_FdJ5XZfiBBEEJdS_OdAiF47dY,1804
209
211
  flwr/server/criterion.py,sha256=ypbAexbztzGUxNen9RCHF91QeqiEQix4t4Ih3E-42MM,1061
210
212
  flwr/server/driver/__init__.py,sha256=bikRv6CjTwSvYh7tf10gziU5o2YotOWhhftz2tr3KDc,886
211
213
  flwr/server/driver/driver.py,sha256=rGLbOfLhBOn74mUHi_0CMbXqZLX8q_lXqEkcUXoL_wI,5238
212
214
  flwr/server/driver/grpc_driver.py,sha256=xd1mxRexeiIJrZw9l-urj2zEIncLT8KtNn0l8hIDYZs,9681
213
- flwr/server/driver/inmemory_driver.py,sha256=up5L2ux9l0pEUZO3hh8p5LufXntGL95IA4E3e7WsxqY,6465
215
+ flwr/server/driver/inmemory_driver.py,sha256=J1pzjzNF18z_sQnez9JmrHqSIDfgn9NX6NfLj1BKrH4,6658
214
216
  flwr/server/history.py,sha256=qSb5_pPTrwofpSYGsZWzMPkl_4uJ4mJFWesxXDrEvDU,5026
215
217
  flwr/server/run_serverapp.py,sha256=SaE9hoWLCAPnRXvdAzE4Oi3QaiC8NOTrHxrIGXjgYxU,10531
216
218
  flwr/server/server.py,sha256=1ZsFEptmAV-L2vP2etNC9Ed5CLSxpuKzUFkAPQ4l5Xc,17893
@@ -218,7 +220,7 @@ flwr/server/server_app.py,sha256=1hul76ospG8L_KooK_ewn1sWPNTNYLTtZMeGNOBNruA,626
218
220
  flwr/server/server_config.py,sha256=CZaHVAsMvGLjpWVcLPkiYxgJN4xfIyAiUrCI3fETKY4,1349
219
221
  flwr/server/serverapp_components.py,sha256=-IV_CitOfrJclJj2jNdbN1Q65PyFmtKtrTIg1hc6WQw,2118
220
222
  flwr/server/strategy/__init__.py,sha256=tQer2SwjDnvgFFuJMZM-S01Z615N5XK6MaCvpm4BMU0,2836
221
- flwr/server/strategy/aggregate.py,sha256=RpMW6R6tPKSJqi-1CwYi0nlLnG3AMhWIt_QqKYVGkog,13455
223
+ flwr/server/strategy/aggregate.py,sha256=iFZ8lp7PV_a2m9kywV-FK0iM33ofxavOs5TIaEQY8nU,13961
222
224
  flwr/server/strategy/bulyan.py,sha256=DDNLLlTJCHgBtij7EpDsa852GHEYjjDB1iORACZO2KE,6513
223
225
  flwr/server/strategy/dp_adaptive_clipping.py,sha256=OoGnSavjfIP6IUkt6z2t62CR3qItgrowtBy27TEdz2s,17387
224
226
  flwr/server/strategy/dp_fixed_clipping.py,sha256=ILmO_AsoMpstOAYK9L6hJpqf7zE6erdMj_SD_-hHAFk,12834
@@ -270,8 +272,8 @@ flwr/server/superlink/fleet/vce/backend/backend.py,sha256=LBAQxnbfPAphVOVIvYMj0Q
270
272
  flwr/server/superlink/fleet/vce/backend/raybackend.py,sha256=7kB3re3mR53b7E6L6DPSioTSKD3YGtS3uJsPD7Hn2Fw,7155
271
273
  flwr/server/superlink/fleet/vce/vce_api.py,sha256=cGPsjS_4SJHm8jszGjsHh8ZNk9nqWoIQwW_62yKKR1Y,12647
272
274
  flwr/server/superlink/state/__init__.py,sha256=Gj2OTFLXvA-mAjBvwuKDM3rDrVaQPcIoybSa2uskMTE,1003
273
- flwr/server/superlink/state/in_memory_state.py,sha256=m7UwCEYwDaUVnPq6AMegmFnHMx7C5ENKHT5GeVaWalM,13792
274
- flwr/server/superlink/state/sqlite_state.py,sha256=tLdpnXZ4KDdWTMXfCylRlu3pGrR156cGgDq9N_0TK0Y,33244
275
+ flwr/server/superlink/state/in_memory_state.py,sha256=XL1cqXSnF87lpTprEtyl9aYRbDp2VuOiJdxvhTSvO18,14936
276
+ flwr/server/superlink/state/sqlite_state.py,sha256=B1DOzHB9BpKLPmCSp4YyjcFspZpFpDsNJqCML6WQlhs,34259
275
277
  flwr/server/superlink/state/state.py,sha256=KpM894R8RE1N0b-s_Nlii6i0TDxj0DRkKa3Vf24Gt70,8127
276
278
  flwr/server/superlink/state/state_factory.py,sha256=Fo8pBQ1WWrVJK5TOEPZ_zgJE69_mfTGjTO6czh6571o,2021
277
279
  flwr/server/superlink/state/utils.py,sha256=OsF3OOoU4bU4PgLWkypX6EDoFs0L8RP_mHEBG-tVqGA,5227
@@ -299,8 +301,8 @@ flwr/superexec/exec_grpc.py,sha256=ZPq7EP55Vwj0kRcLVuTCokFqfIgBk-7YmDykZoMKi-c,1
299
301
  flwr/superexec/exec_servicer.py,sha256=TRpwPVl7eI0Y_xlCY6DmVpAo0yFU1gLwzyIeqFw9pyk,4746
300
302
  flwr/superexec/executor.py,sha256=-5J-ZLs-uArro3T2pCq0YQRC65cs18M888nufzdYE4E,2375
301
303
  flwr/superexec/simulation.py,sha256=J6pw-RqCSiUed8I_3MasZH4tl57ZmDebPAHNnbb0-vE,7420
302
- flwr_nightly-1.12.0.dev20241006.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
303
- flwr_nightly-1.12.0.dev20241006.dist-info/METADATA,sha256=OGFyy8F9HtsY-OAw8UZsNKncs_mkkG4NsNQvlAc0Mx4,15618
304
- flwr_nightly-1.12.0.dev20241006.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
305
- flwr_nightly-1.12.0.dev20241006.dist-info/entry_points.txt,sha256=WUCbqhLEOzjx_lyATIM0-f0e8kOVaQjzwOvyOxHrMhs,434
306
- flwr_nightly-1.12.0.dev20241006.dist-info/RECORD,,
304
+ flwr_nightly-1.12.0.dev20241008.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
305
+ flwr_nightly-1.12.0.dev20241008.dist-info/METADATA,sha256=zN8XXlFv6FErD9p0u6EZbxrZpYWCteNx8zJGkuPZMmM,15618
306
+ flwr_nightly-1.12.0.dev20241008.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
307
+ flwr_nightly-1.12.0.dev20241008.dist-info/entry_points.txt,sha256=WUCbqhLEOzjx_lyATIM0-f0e8kOVaQjzwOvyOxHrMhs,434
308
+ flwr_nightly-1.12.0.dev20241008.dist-info/RECORD,,